The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/mps/mps_sas.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
    3  *
    4  * Copyright (c) 2009 Yahoo! Inc.
    5  * Copyright (c) 2011-2015 LSI Corp.
    6  * Copyright (c) 2013-2015 Avago Technologies
    7  * All rights reserved.
    8  *
    9  * Redistribution and use in source and binary forms, with or without
   10  * modification, are permitted provided that the following conditions
   11  * are met:
   12  * 1. Redistributions of source code must retain the above copyright
   13  *    notice, this list of conditions and the following disclaimer.
   14  * 2. Redistributions in binary form must reproduce the above copyright
   15  *    notice, this list of conditions and the following disclaimer in the
   16  *    documentation and/or other materials provided with the distribution.
   17  *
   18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   28  * SUCH DAMAGE.
   29  *
   30  * Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD
   31  *
   32  * $FreeBSD$
   33  */
   34 
   35 #include <sys/cdefs.h>
   36 __FBSDID("$FreeBSD$");
   37 
   38 /* Communications core for Avago Technologies (LSI) MPT2 */
   39 
   40 /* TODO Move headers to mpsvar */
   41 #include <sys/types.h>
   42 #include <sys/param.h>
   43 #include <sys/systm.h>
   44 #include <sys/kernel.h>
   45 #include <sys/selinfo.h>
   46 #include <sys/module.h>
   47 #include <sys/bus.h>
   48 #include <sys/conf.h>
   49 #include <sys/bio.h>
   50 #include <sys/malloc.h>
   51 #include <sys/uio.h>
   52 #include <sys/sysctl.h>
   53 #include <sys/endian.h>
   54 #include <sys/queue.h>
   55 #include <sys/kthread.h>
   56 #include <sys/taskqueue.h>
   57 #include <sys/sbuf.h>
   58 
   59 #include <machine/bus.h>
   60 #include <machine/resource.h>
   61 #include <sys/rman.h>
   62 
   63 #include <machine/stdarg.h>
   64 
   65 #include <cam/cam.h>
   66 #include <cam/cam_ccb.h>
   67 #include <cam/cam_xpt.h>
   68 #include <cam/cam_debug.h>
   69 #include <cam/cam_sim.h>
   70 #include <cam/cam_xpt_sim.h>
   71 #include <cam/cam_xpt_periph.h>
   72 #include <cam/cam_periph.h>
   73 #include <cam/scsi/scsi_all.h>
   74 #include <cam/scsi/scsi_message.h>
   75 #include <cam/scsi/smp_all.h>
   76 
   77 #include <dev/mps/mpi/mpi2_type.h>
   78 #include <dev/mps/mpi/mpi2.h>
   79 #include <dev/mps/mpi/mpi2_ioc.h>
   80 #include <dev/mps/mpi/mpi2_sas.h>
   81 #include <dev/mps/mpi/mpi2_cnfg.h>
   82 #include <dev/mps/mpi/mpi2_init.h>
   83 #include <dev/mps/mpi/mpi2_tool.h>
   84 #include <dev/mps/mps_ioctl.h>
   85 #include <dev/mps/mpsvar.h>
   86 #include <dev/mps/mps_table.h>
   87 #include <dev/mps/mps_sas.h>
   88 
   89 /*
   90  * static array to check SCSI OpCode for EEDP protection bits
   91  */
   92 #define PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
   93 #define PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
   94 #define PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
   95 static uint8_t op_code_prot[256] = {
   96         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
   97         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
   98         0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
   99         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  100         0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  101         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  102         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  103         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  104         0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
  105         0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  106         0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
  107         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  108         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  109         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  110         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  111         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
  112 };
  113 
  114 MALLOC_DEFINE(M_MPSSAS, "MPSSAS", "MPS SAS memory");
  115 
  116 static void mpssas_remove_device(struct mps_softc *, struct mps_command *);
  117 static void mpssas_remove_complete(struct mps_softc *, struct mps_command *);
  118 static void mpssas_action(struct cam_sim *sim, union ccb *ccb);
  119 static void mpssas_poll(struct cam_sim *sim);
  120 static int mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm,
  121     struct mps_command *cm);
  122 static void mpssas_scsiio_timeout(void *data);
  123 static void mpssas_abort_complete(struct mps_softc *sc, struct mps_command *cm);
  124 static void mpssas_direct_drive_io(struct mpssas_softc *sassc,
  125     struct mps_command *cm, union ccb *ccb);
  126 static void mpssas_action_scsiio(struct mpssas_softc *, union ccb *);
  127 static void mpssas_scsiio_complete(struct mps_softc *, struct mps_command *);
  128 static void mpssas_action_resetdev(struct mpssas_softc *, union ccb *);
  129 static void mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm);
  130 static void mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb,
  131                                uint64_t sasaddr);
  132 static void mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb);
  133 static void mpssas_resetdev_complete(struct mps_softc *, struct mps_command *);
  134 static void mpssas_async(void *callback_arg, uint32_t code,
  135                          struct cam_path *path, void *arg);
  136 static int mpssas_send_portenable(struct mps_softc *sc);
  137 static void mpssas_portenable_complete(struct mps_softc *sc,
  138     struct mps_command *cm);
  139 
  140 struct mpssas_target *
  141 mpssas_find_target_by_handle(struct mpssas_softc *sassc, int start, uint16_t handle)
  142 {
  143         struct mpssas_target *target;
  144         int i;
  145 
  146         for (i = start; i < sassc->maxtargets; i++) {
  147                 target = &sassc->targets[i];
  148                 if (target->handle == handle)
  149                         return (target);
  150         }
  151 
  152         return (NULL);
  153 }
  154 
  155 /* we need to freeze the simq during attach and diag reset, to avoid failing
  156  * commands before device handles have been found by discovery.  Since
  157  * discovery involves reading config pages and possibly sending commands,
  158  * discovery actions may continue even after we receive the end of discovery
  159  * event, so refcount discovery actions instead of assuming we can unfreeze
  160  * the simq when we get the event.
  161  */
  162 void
  163 mpssas_startup_increment(struct mpssas_softc *sassc)
  164 {
  165         MPS_FUNCTRACE(sassc->sc);
  166 
  167         if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
  168                 if (sassc->startup_refcount++ == 0) {
  169                         /* just starting, freeze the simq */
  170                         mps_dprint(sassc->sc, MPS_INIT,
  171                             "%s freezing simq\n", __func__);
  172                         xpt_hold_boot();
  173                         xpt_freeze_simq(sassc->sim, 1);
  174                 }
  175                 mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
  176                     sassc->startup_refcount);
  177         }
  178 }
  179 
  180 void
  181 mpssas_release_simq_reinit(struct mpssas_softc *sassc)
  182 {
  183         if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
  184                 sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
  185                 xpt_release_simq(sassc->sim, 1);
  186                 mps_dprint(sassc->sc, MPS_INFO, "Unfreezing SIM queue\n");
  187         }
  188 }
  189 
  190 void
  191 mpssas_startup_decrement(struct mpssas_softc *sassc)
  192 {
  193         MPS_FUNCTRACE(sassc->sc);
  194 
  195         if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
  196                 if (--sassc->startup_refcount == 0) {
  197                         /* finished all discovery-related actions, release
  198                          * the simq and rescan for the latest topology.
  199                          */
  200                         mps_dprint(sassc->sc, MPS_INIT,
  201                             "%s releasing simq\n", __func__);
  202                         sassc->flags &= ~MPSSAS_IN_STARTUP;
  203                         xpt_release_simq(sassc->sim, 1);
  204                         xpt_release_boot();
  205                 }
  206                 mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
  207                     sassc->startup_refcount);
  208         }
  209 }
  210 
  211 /*
  212  * The firmware requires us to stop sending commands when we're doing task
  213  * management.
  214  * XXX The logic for serializing the device has been made lazy and moved to
  215  * mpssas_prepare_for_tm().
  216  */
  217 struct mps_command *
  218 mpssas_alloc_tm(struct mps_softc *sc)
  219 {
  220         MPI2_SCSI_TASK_MANAGE_REQUEST *req;
  221         struct mps_command *tm;
  222 
  223         tm = mps_alloc_high_priority_command(sc);
  224         if (tm == NULL)
  225                 return (NULL);
  226 
  227         req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
  228         req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
  229         return tm;
  230 }
  231 
  232 void
  233 mpssas_free_tm(struct mps_softc *sc, struct mps_command *tm)
  234 {
  235         if (tm == NULL)
  236                 return;
  237 
  238         /*
  239          * For TM's the devq is frozen for the device.  Unfreeze it here and
  240          * free the resources used for freezing the devq.  Must clear the
  241          * INRESET flag as well or scsi I/O will not work.
  242          */
  243         if (tm->cm_ccb) {
  244                 mps_dprint(sc, MPS_XINFO | MPS_RECOVERY,
  245                     "Unfreezing devq for target ID %d\n",
  246                     tm->cm_targ->tid);
  247                 tm->cm_targ->flags &= ~MPSSAS_TARGET_INRESET;
  248                 xpt_release_devq(tm->cm_ccb->ccb_h.path, 1, TRUE);
  249                 xpt_free_path(tm->cm_ccb->ccb_h.path);
  250                 xpt_free_ccb(tm->cm_ccb);
  251         }
  252 
  253         mps_free_high_priority_command(sc, tm);
  254 }
  255 
  256 void
  257 mpssas_rescan_target(struct mps_softc *sc, struct mpssas_target *targ)
  258 {
  259         struct mpssas_softc *sassc = sc->sassc;
  260         path_id_t pathid;
  261         target_id_t targetid;
  262         union ccb *ccb;
  263 
  264         MPS_FUNCTRACE(sc);
  265         pathid = cam_sim_path(sassc->sim);
  266         if (targ == NULL)
  267                 targetid = CAM_TARGET_WILDCARD;
  268         else
  269                 targetid = targ - sassc->targets;
  270 
  271         /*
  272          * Allocate a CCB and schedule a rescan.
  273          */
  274         ccb = xpt_alloc_ccb_nowait();
  275         if (ccb == NULL) {
  276                 mps_dprint(sc, MPS_ERROR, "unable to alloc CCB for rescan\n");
  277                 return;
  278         }
  279 
  280         if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid,
  281             targetid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
  282                 mps_dprint(sc, MPS_ERROR, "unable to create path for rescan\n");
  283                 xpt_free_ccb(ccb);
  284                 return;
  285         }
  286 
  287         if (targetid == CAM_TARGET_WILDCARD)
  288                 ccb->ccb_h.func_code = XPT_SCAN_BUS;
  289         else
  290                 ccb->ccb_h.func_code = XPT_SCAN_TGT;     
  291 
  292         mps_dprint(sc, MPS_TRACE, "%s targetid %u\n", __func__, targetid);
  293         xpt_rescan(ccb);
  294 }
  295 
  296 static void
  297 mpssas_log_command(struct mps_command *cm, u_int level, const char *fmt, ...)
  298 {
  299         struct sbuf sb;
  300         va_list ap;
  301         char str[224];
  302         char path_str[64];
  303 
  304         if (cm == NULL)
  305                 return;
  306 
  307         /* No need to be in here if debugging isn't enabled */
  308         if ((cm->cm_sc->mps_debug & level) == 0)
  309                 return;
  310 
  311         sbuf_new(&sb, str, sizeof(str), 0);
  312 
  313         va_start(ap, fmt);
  314 
  315         if (cm->cm_ccb != NULL) {
  316                 xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
  317                                 sizeof(path_str));
  318                 sbuf_cat(&sb, path_str);
  319                 if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
  320                         scsi_command_string(&cm->cm_ccb->csio, &sb);
  321                         sbuf_printf(&sb, "length %d ",
  322                                     cm->cm_ccb->csio.dxfer_len);
  323                 }
  324         }
  325         else {
  326                 sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
  327                     cam_sim_name(cm->cm_sc->sassc->sim),
  328                     cam_sim_unit(cm->cm_sc->sassc->sim),
  329                     cam_sim_bus(cm->cm_sc->sassc->sim),
  330                     cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
  331                     cm->cm_lun);
  332         }
  333 
  334         sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
  335         sbuf_vprintf(&sb, fmt, ap);
  336         sbuf_finish(&sb);
  337         mps_print_field(cm->cm_sc, "%s", sbuf_data(&sb));
  338 
  339         va_end(ap);
  340 }
  341 
  342 static void
  343 mpssas_remove_volume(struct mps_softc *sc, struct mps_command *tm)
  344 {
  345         MPI2_SCSI_TASK_MANAGE_REPLY *reply;
  346         struct mpssas_target *targ;
  347         uint16_t handle;
  348 
  349         MPS_FUNCTRACE(sc);
  350 
  351         reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
  352         handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
  353         targ = tm->cm_targ;
  354 
  355         if (reply == NULL) {
  356                 /* XXX retry the remove after the diag reset completes? */
  357                 mps_dprint(sc, MPS_FAULT,
  358                     "%s NULL reply resetting device 0x%04x\n", __func__,
  359                     handle);
  360                 mpssas_free_tm(sc, tm);
  361                 return;
  362         }
  363 
  364         if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
  365             MPI2_IOCSTATUS_SUCCESS) {
  366                 mps_dprint(sc, MPS_ERROR,
  367                    "IOCStatus = 0x%x while resetting device 0x%x\n",
  368                    le16toh(reply->IOCStatus), handle);
  369         }
  370 
  371         mps_dprint(sc, MPS_XINFO,
  372             "Reset aborted %u commands\n", reply->TerminationCount);
  373         mps_free_reply(sc, tm->cm_reply_data);
  374         tm->cm_reply = NULL;    /* Ensures the reply won't get re-freed */
  375 
  376         mps_dprint(sc, MPS_XINFO,
  377             "clearing target %u handle 0x%04x\n", targ->tid, handle);
  378 
  379         /*
  380          * Don't clear target if remove fails because things will get confusing.
  381          * Leave the devname and sasaddr intact so that we know to avoid reusing
  382          * this target id if possible, and so we can assign the same target id
  383          * to this device if it comes back in the future.
  384          */
  385         if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
  386             MPI2_IOCSTATUS_SUCCESS) {
  387                 targ = tm->cm_targ;
  388                 targ->handle = 0x0;
  389                 targ->encl_handle = 0x0;
  390                 targ->encl_slot = 0x0;
  391                 targ->exp_dev_handle = 0x0;
  392                 targ->phy_num = 0x0;
  393                 targ->linkrate = 0x0;
  394                 targ->devinfo = 0x0;
  395                 targ->flags = 0x0;
  396         }
  397 
  398         mpssas_free_tm(sc, tm);
  399 }
  400 
  401 /*
  402  * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
  403  * Otherwise Volume Delete is same as Bare Drive Removal.
  404  */
  405 void
  406 mpssas_prepare_volume_remove(struct mpssas_softc *sassc, uint16_t handle)
  407 {
  408         MPI2_SCSI_TASK_MANAGE_REQUEST *req;
  409         struct mps_softc *sc;
  410         struct mps_command *tm;
  411         struct mpssas_target *targ = NULL;
  412 
  413         MPS_FUNCTRACE(sassc->sc);
  414         sc = sassc->sc;
  415 
  416 #ifdef WD_SUPPORT
  417         /*
  418          * If this is a WD controller, determine if the disk should be exposed
  419          * to the OS or not.  If disk should be exposed, return from this
  420          * function without doing anything.
  421          */
  422         if (sc->WD_available && (sc->WD_hide_expose ==
  423             MPS_WD_EXPOSE_ALWAYS)) {
  424                 return;
  425         }
  426 #endif //WD_SUPPORT
  427 
  428         targ = mpssas_find_target_by_handle(sassc, 0, handle);
  429         if (targ == NULL) {
  430                 /* FIXME: what is the action? */
  431                 /* We don't know about this device? */
  432                 mps_dprint(sc, MPS_ERROR,
  433                    "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
  434                 return;
  435         }
  436 
  437         targ->flags |= MPSSAS_TARGET_INREMOVAL;
  438 
  439         tm = mpssas_alloc_tm(sc);
  440         if (tm == NULL) {
  441                 mps_dprint(sc, MPS_ERROR,
  442                     "%s: command alloc failure\n", __func__);
  443                 return;
  444         }
  445 
  446         mpssas_rescan_target(sc, targ);
  447 
  448         req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
  449         req->DevHandle = targ->handle;
  450         req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
  451 
  452         /* SAS Hard Link Reset / SATA Link Reset */
  453         req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
  454 
  455         tm->cm_targ = targ;
  456         tm->cm_data = NULL;
  457         tm->cm_complete = mpssas_remove_volume;
  458         tm->cm_complete_data = (void *)(uintptr_t)handle;
  459 
  460         mps_dprint(sc, MPS_INFO, "%s: Sending reset for target ID %d\n",
  461             __func__, targ->tid);
  462         mpssas_prepare_for_tm(sc, tm, targ, CAM_LUN_WILDCARD);
  463 
  464         mps_map_command(sc, tm);
  465 }
  466 
  467 /*
  468  * The MPT2 firmware performs debounce on the link to avoid transient link
  469  * errors and false removals.  When it does decide that link has been lost
  470  * and a device need to go away, it expects that the host will perform a
  471  * target reset and then an op remove.  The reset has the side-effect of
  472  * aborting any outstanding requests for the device, which is required for
  473  * the op-remove to succeed.  It's not clear if the host should check for
  474  * the device coming back alive after the reset.
  475  */
  476 void
  477 mpssas_prepare_remove(struct mpssas_softc *sassc, uint16_t handle)
  478 {
  479         MPI2_SCSI_TASK_MANAGE_REQUEST *req;
  480         struct mps_softc *sc;
  481         struct mps_command *cm;
  482         struct mpssas_target *targ = NULL;
  483 
  484         MPS_FUNCTRACE(sassc->sc);
  485 
  486         sc = sassc->sc;
  487 
  488         targ = mpssas_find_target_by_handle(sassc, 0, handle);
  489         if (targ == NULL) {
  490                 /* FIXME: what is the action? */
  491                 /* We don't know about this device? */
  492                 mps_dprint(sc, MPS_ERROR,
  493                     "%s : invalid handle 0x%x \n", __func__, handle);
  494                 return;
  495         }
  496 
  497         targ->flags |= MPSSAS_TARGET_INREMOVAL;
  498 
  499         cm = mpssas_alloc_tm(sc);
  500         if (cm == NULL) {
  501                 mps_dprint(sc, MPS_ERROR,
  502                     "%s: command alloc failure\n", __func__);
  503                 return;
  504         }
  505 
  506         mpssas_rescan_target(sc, targ);
  507 
  508         req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
  509         req->DevHandle = htole16(targ->handle);
  510         req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
  511 
  512         /* SAS Hard Link Reset / SATA Link Reset */
  513         req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
  514 
  515         cm->cm_targ = targ;
  516         cm->cm_data = NULL;
  517         cm->cm_complete = mpssas_remove_device;
  518         cm->cm_complete_data = (void *)(uintptr_t)handle;
  519 
  520         mps_dprint(sc, MPS_INFO, "%s: Sending reset for target ID %d\n",
  521             __func__, targ->tid);
  522         mpssas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
  523 
  524         mps_map_command(sc, cm);
  525 }
  526 
  527 static void
  528 mpssas_remove_device(struct mps_softc *sc, struct mps_command *tm)
  529 {
  530         MPI2_SCSI_TASK_MANAGE_REPLY *reply;
  531         MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
  532         struct mpssas_target *targ;
  533         uint16_t handle;
  534 
  535         MPS_FUNCTRACE(sc);
  536 
  537         reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
  538         handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
  539         targ = tm->cm_targ;
  540 
  541         /*
  542          * Currently there should be no way we can hit this case.  It only
  543          * happens when we have a failure to allocate chain frames, and
  544          * task management commands don't have S/G lists.
  545          */
  546         if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
  547                 mps_dprint(sc, MPS_ERROR,
  548                     "%s: cm_flags = %#x for remove of handle %#04x! "
  549                     "This should not happen!\n", __func__, tm->cm_flags,
  550                     handle);
  551         }
  552 
  553         if (reply == NULL) {
  554                 /* XXX retry the remove after the diag reset completes? */
  555                 mps_dprint(sc, MPS_FAULT,
  556                     "%s NULL reply resetting device 0x%04x\n", __func__,
  557                     handle);
  558                 mpssas_free_tm(sc, tm);
  559                 return;
  560         }
  561 
  562         if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
  563             MPI2_IOCSTATUS_SUCCESS) {
  564                 mps_dprint(sc, MPS_ERROR,
  565                    "IOCStatus = 0x%x while resetting device 0x%x\n",
  566                    le16toh(reply->IOCStatus), handle);
  567         }
  568 
  569         mps_dprint(sc, MPS_XINFO, "Reset aborted %u commands\n",
  570             le32toh(reply->TerminationCount));
  571         mps_free_reply(sc, tm->cm_reply_data);
  572         tm->cm_reply = NULL;    /* Ensures the reply won't get re-freed */
  573 
  574         /* Reuse the existing command */
  575         req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
  576         memset(req, 0, sizeof(*req));
  577         req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
  578         req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
  579         req->DevHandle = htole16(handle);
  580         tm->cm_data = NULL;
  581         tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
  582         tm->cm_complete = mpssas_remove_complete;
  583         tm->cm_complete_data = (void *)(uintptr_t)handle;
  584 
  585         /*
  586          * Wait to send the REMOVE_DEVICE until all the commands have cleared.
  587          * They should be aborted or time out and we'll kick thus off there
  588          * if so.
  589          */
  590         if (TAILQ_FIRST(&targ->commands) == NULL) {
  591                 mps_dprint(sc, MPS_INFO,
  592                     "No pending commands: starting remove_device target %u handle 0x%04x\n",
  593                     targ->tid, handle);
  594                 mps_map_command(sc, tm);
  595                 targ->pending_remove_tm = NULL;
  596         } else {
  597                 targ->pending_remove_tm = tm;
  598         }
  599 
  600         mps_dprint(sc, MPS_XINFO, "clearing target %u handle 0x%04x\n",
  601                    targ->tid, handle);
  602 }
  603 
  604 static void
  605 mpssas_remove_complete(struct mps_softc *sc, struct mps_command *tm)
  606 {
  607         MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
  608         uint16_t handle;
  609         struct mpssas_target *targ;
  610         struct mpssas_lun *lun;
  611 
  612         MPS_FUNCTRACE(sc);
  613 
  614         reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
  615         handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
  616         targ = tm->cm_targ;
  617 
  618         /*
  619          * At this point, we should have no pending commands for the target.
  620          * The remove target has just completed.
  621          */
  622         KASSERT(TAILQ_FIRST(&targ->commands) == NULL,
  623             ("%s: no commands should be pending\n", __func__));
  624 
  625         /*
  626          * Currently there should be no way we can hit this case.  It only
  627          * happens when we have a failure to allocate chain frames, and
  628          * task management commands don't have S/G lists.
  629          */
  630         if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
  631                 mps_dprint(sc, MPS_XINFO,
  632                            "%s: cm_flags = %#x for remove of handle %#04x! "
  633                            "This should not happen!\n", __func__, tm->cm_flags,
  634                            handle);
  635                 mpssas_free_tm(sc, tm);
  636                 return;
  637         }
  638 
  639         if (reply == NULL) {
  640                 /* most likely a chip reset */
  641                 mps_dprint(sc, MPS_FAULT,
  642                     "%s NULL reply removing device 0x%04x\n", __func__, handle);
  643                 mpssas_free_tm(sc, tm);
  644                 return;
  645         }
  646 
  647         mps_dprint(sc, MPS_XINFO,
  648             "%s on handle 0x%04x, IOCStatus= 0x%x\n", __func__, 
  649             handle, le16toh(reply->IOCStatus));
  650 
  651         /*
  652          * Don't clear target if remove fails because things will get confusing.
  653          * Leave the devname and sasaddr intact so that we know to avoid reusing
  654          * this target id if possible, and so we can assign the same target id
  655          * to this device if it comes back in the future.
  656          */
  657         if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
  658             MPI2_IOCSTATUS_SUCCESS) {
  659                 targ->handle = 0x0;
  660                 targ->encl_handle = 0x0;
  661                 targ->encl_slot = 0x0;
  662                 targ->exp_dev_handle = 0x0;
  663                 targ->phy_num = 0x0;
  664                 targ->linkrate = 0x0;
  665                 targ->devinfo = 0x0;
  666                 targ->flags = 0x0;
  667                 
  668                 while(!SLIST_EMPTY(&targ->luns)) {
  669                         lun = SLIST_FIRST(&targ->luns);
  670                         SLIST_REMOVE_HEAD(&targ->luns, lun_link);
  671                         free(lun, M_MPT2);
  672                 }
  673         }
  674 
  675         mpssas_free_tm(sc, tm);
  676 }
  677 
  678 static int
  679 mpssas_register_events(struct mps_softc *sc)
  680 {
  681         u32 events[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS];
  682 
  683         bzero(events, 16);
  684         setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
  685         setbit(events, MPI2_EVENT_SAS_DISCOVERY);
  686         setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
  687         setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
  688         setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
  689         setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
  690         setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
  691         setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
  692         setbit(events, MPI2_EVENT_IR_VOLUME);
  693         setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
  694         setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
  695         setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED);
  696 
  697         mps_register_events(sc, events, mpssas_evt_handler, NULL,
  698             &sc->sassc->mpssas_eh);
  699 
  700         return (0);
  701 }
  702 
  703 int
  704 mps_attach_sas(struct mps_softc *sc)
  705 {
  706         struct mpssas_softc *sassc;
  707         cam_status status;
  708         int unit, error = 0, reqs;
  709 
  710         MPS_FUNCTRACE(sc);
  711         mps_dprint(sc, MPS_INIT, "%s entered\n", __func__);
  712 
  713         sassc = malloc(sizeof(struct mpssas_softc), M_MPT2, M_WAITOK|M_ZERO);
  714 
  715         /*
  716          * XXX MaxTargets could change during a reinit.  Since we don't
  717          * resize the targets[] array during such an event, cache the value
  718          * of MaxTargets here so that we don't get into trouble later.  This
  719          * should move into the reinit logic.
  720          */
  721         sassc->maxtargets = sc->facts->MaxTargets + sc->facts->MaxVolumes;
  722         sassc->targets = malloc(sizeof(struct mpssas_target) *
  723             sassc->maxtargets, M_MPT2, M_WAITOK|M_ZERO);
  724         sc->sassc = sassc;
  725         sassc->sc = sc;
  726 
  727         reqs = sc->num_reqs - sc->num_prireqs - 1;
  728         if ((sassc->devq = cam_simq_alloc(reqs)) == NULL) {
  729                 mps_dprint(sc, MPS_ERROR, "Cannot allocate SIMQ\n");
  730                 error = ENOMEM;
  731                 goto out;
  732         }
  733 
  734         unit = device_get_unit(sc->mps_dev);
  735         sassc->sim = cam_sim_alloc(mpssas_action, mpssas_poll, "mps", sassc,
  736             unit, &sc->mps_mtx, reqs, reqs, sassc->devq);
  737         if (sassc->sim == NULL) {
  738                 mps_dprint(sc, MPS_INIT|MPS_ERROR, "Cannot allocate SIM\n");
  739                 error = EINVAL;
  740                 goto out;
  741         }
  742 
  743         TAILQ_INIT(&sassc->ev_queue);
  744 
  745         /* Initialize taskqueue for Event Handling */
  746         TASK_INIT(&sassc->ev_task, 0, mpssas_firmware_event_work, sc);
  747         sassc->ev_tq = taskqueue_create("mps_taskq", M_NOWAIT | M_ZERO,
  748             taskqueue_thread_enqueue, &sassc->ev_tq);
  749         taskqueue_start_threads(&sassc->ev_tq, 1, PRIBIO, "%s taskq", 
  750             device_get_nameunit(sc->mps_dev));
  751 
  752         mps_lock(sc);
  753 
  754         /*
  755          * XXX There should be a bus for every port on the adapter, but since
  756          * we're just going to fake the topology for now, we'll pretend that
  757          * everything is just a target on a single bus.
  758          */
  759         if ((error = xpt_bus_register(sassc->sim, sc->mps_dev, 0)) != 0) {
  760                 mps_dprint(sc, MPS_INIT|MPS_ERROR,
  761                     "Error %d registering SCSI bus\n", error);
  762                 mps_unlock(sc);
  763                 goto out;
  764         }
  765 
  766         /*
  767          * Assume that discovery events will start right away.
  768          *
  769          * Hold off boot until discovery is complete.
  770          */
  771         sassc->flags |= MPSSAS_IN_STARTUP | MPSSAS_IN_DISCOVERY;
  772         sc->sassc->startup_refcount = 0;
  773         mpssas_startup_increment(sassc);
  774 
  775         mps_unlock(sc);
  776 
  777         /*
  778          * Register for async events so we can determine the EEDP
  779          * capabilities of devices.
  780          */
  781         status = xpt_create_path(&sassc->path, /*periph*/NULL,
  782             cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
  783             CAM_LUN_WILDCARD);
  784         if (status != CAM_REQ_CMP) {
  785                 mps_dprint(sc, MPS_ERROR|MPS_INIT,
  786                     "Error %#x creating sim path\n", status);
  787                 sassc->path = NULL;
  788         } else {
  789                 int event;
  790 
  791                 event = AC_ADVINFO_CHANGED;
  792                 status = xpt_register_async(event, mpssas_async, sc,
  793                                             sassc->path);
  794                 if (status != CAM_REQ_CMP) {
  795                         mps_dprint(sc, MPS_ERROR,
  796                             "Error %#x registering async handler for "
  797                             "AC_ADVINFO_CHANGED events\n", status);
  798                         xpt_free_path(sassc->path);
  799                         sassc->path = NULL;
  800                 }
  801         }
  802         if (status != CAM_REQ_CMP) {
  803                 /*
  804                  * EEDP use is the exception, not the rule.
  805                  * Warn the user, but do not fail to attach.
  806                  */
  807                 mps_printf(sc, "EEDP capabilities disabled.\n");
  808         }
  809 
  810         mpssas_register_events(sc);
  811 out:
  812         if (error)
  813                 mps_detach_sas(sc);
  814 
  815         mps_dprint(sc, MPS_INIT, "%s exit error= %d\n", __func__, error);
  816         return (error);
  817 }
  818 
  819 int
  820 mps_detach_sas(struct mps_softc *sc)
  821 {
  822         struct mpssas_softc *sassc;
  823         struct mpssas_lun *lun, *lun_tmp;
  824         struct mpssas_target *targ;
  825         int i;
  826 
  827         MPS_FUNCTRACE(sc);
  828 
  829         if (sc->sassc == NULL)
  830                 return (0);
  831 
  832         sassc = sc->sassc;
  833         mps_deregister_events(sc, sassc->mpssas_eh);
  834 
  835         /*
  836          * Drain and free the event handling taskqueue with the lock
  837          * unheld so that any parallel processing tasks drain properly
  838          * without deadlocking.
  839          */
  840         if (sassc->ev_tq != NULL)
  841                 taskqueue_free(sassc->ev_tq);
  842 
  843         /* Deregister our async handler */
  844         if (sassc->path != NULL) {
  845                 xpt_register_async(0, mpssas_async, sc, sassc->path);
  846                 xpt_free_path(sassc->path);
  847                 sassc->path = NULL;
  848         }
  849 
  850         /* Make sure CAM doesn't wedge if we had to bail out early. */
  851         mps_lock(sc);
  852 
  853         while (sassc->startup_refcount != 0)
  854                 mpssas_startup_decrement(sassc);
  855 
  856         if (sassc->flags & MPSSAS_IN_STARTUP)
  857                 xpt_release_simq(sassc->sim, 1);
  858 
  859         if (sassc->sim != NULL) {
  860                 xpt_bus_deregister(cam_sim_path(sassc->sim));
  861                 cam_sim_free(sassc->sim, FALSE);
  862         }
  863 
  864         mps_unlock(sc);
  865 
  866         if (sassc->devq != NULL)
  867                 cam_simq_free(sassc->devq);
  868 
  869         for(i=0; i< sassc->maxtargets ;i++) {
  870                 targ = &sassc->targets[i];
  871                 SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
  872                         free(lun, M_MPT2);
  873                 }
  874         }
  875         free(sassc->targets, M_MPT2);
  876         free(sassc, M_MPT2);
  877         sc->sassc = NULL;
  878 
  879         return (0);
  880 }
  881 
  882 void
  883 mpssas_discovery_end(struct mpssas_softc *sassc)
  884 {
  885         struct mps_softc *sc = sassc->sc;
  886 
  887         MPS_FUNCTRACE(sc);
  888 
  889         /*
  890          * After discovery has completed, check the mapping table for any
  891          * missing devices and update their missing counts. Only do this once
  892          * whenever the driver is initialized so that missing counts aren't
  893          * updated unnecessarily. Note that just because discovery has
  894          * completed doesn't mean that events have been processed yet. The
  895          * check_devices function is a callout timer that checks if ALL devices
  896          * are missing. If so, it will wait a little longer for events to
  897          * complete and keep resetting itself until some device in the mapping
  898          * table is not missing, meaning that event processing has started.
  899          */
  900         if (sc->track_mapping_events) {
  901                 mps_dprint(sc, MPS_XINFO | MPS_MAPPING, "Discovery has "
  902                     "completed. Check for missing devices in the mapping "
  903                     "table.\n");
  904                 callout_reset(&sc->device_check_callout,
  905                     MPS_MISSING_CHECK_DELAY * hz, mps_mapping_check_devices,
  906                     sc);
  907         }
  908 }
  909 
  910 static void
  911 mpssas_action(struct cam_sim *sim, union ccb *ccb)
  912 {
  913         struct mpssas_softc *sassc;
  914 
  915         sassc = cam_sim_softc(sim);
  916 
  917         MPS_FUNCTRACE(sassc->sc);
  918         mps_dprint(sassc->sc, MPS_TRACE, "ccb func_code 0x%x\n",
  919             ccb->ccb_h.func_code);
  920         mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
  921 
  922         switch (ccb->ccb_h.func_code) {
  923         case XPT_PATH_INQ:
  924         {
  925                 struct ccb_pathinq *cpi = &ccb->cpi;
  926                 struct mps_softc *sc = sassc->sc;
  927 
  928                 cpi->version_num = 1;
  929                 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
  930                 cpi->target_sprt = 0;
  931                 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
  932                 cpi->hba_eng_cnt = 0;
  933                 cpi->max_target = sassc->maxtargets - 1;
  934                 cpi->max_lun = 255;
  935 
  936                 /*
  937                  * initiator_id is set here to an ID outside the set of valid
  938                  * target IDs (including volumes).
  939                  */
  940                 cpi->initiator_id = sassc->maxtargets;
  941                 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
  942                 strlcpy(cpi->hba_vid, "Avago Tech", HBA_IDLEN);
  943                 strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
  944                 cpi->unit_number = cam_sim_unit(sim);
  945                 cpi->bus_id = cam_sim_bus(sim);
  946                 cpi->base_transfer_speed = 150000;
  947                 cpi->transport = XPORT_SAS;
  948                 cpi->transport_version = 0;
  949                 cpi->protocol = PROTO_SCSI;
  950                 cpi->protocol_version = SCSI_REV_SPC;
  951                 cpi->maxio = sc->maxio;
  952                 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
  953                 break;
  954         }
  955         case XPT_GET_TRAN_SETTINGS:
  956         {
  957                 struct ccb_trans_settings       *cts;
  958                 struct ccb_trans_settings_sas   *sas;
  959                 struct ccb_trans_settings_scsi  *scsi;
  960                 struct mpssas_target *targ;
  961 
  962                 cts = &ccb->cts;
  963                 sas = &cts->xport_specific.sas;
  964                 scsi = &cts->proto_specific.scsi;
  965 
  966                 KASSERT(cts->ccb_h.target_id < sassc->maxtargets,
  967                     ("Target %d out of bounds in XPT_GET_TRANS_SETTINGS\n",
  968                     cts->ccb_h.target_id));
  969                 targ = &sassc->targets[cts->ccb_h.target_id];
  970                 if (targ->handle == 0x0) {
  971                         mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
  972                         break;
  973                 }
  974 
  975                 cts->protocol_version = SCSI_REV_SPC2;
  976                 cts->transport = XPORT_SAS;
  977                 cts->transport_version = 0;
  978 
  979                 sas->valid = CTS_SAS_VALID_SPEED;
  980                 switch (targ->linkrate) {
  981                 case 0x08:
  982                         sas->bitrate = 150000;
  983                         break;
  984                 case 0x09:
  985                         sas->bitrate = 300000;
  986                         break;
  987                 case 0x0a:
  988                         sas->bitrate = 600000;
  989                         break;
  990                 default:
  991                         sas->valid = 0;
  992                 }
  993 
  994                 cts->protocol = PROTO_SCSI;
  995                 scsi->valid = CTS_SCSI_VALID_TQ;
  996                 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
  997 
  998                 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
  999                 break;
 1000         }
 1001         case XPT_CALC_GEOMETRY:
 1002                 cam_calc_geometry(&ccb->ccg, /*extended*/1);
 1003                 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
 1004                 break;
 1005         case XPT_RESET_DEV:
 1006                 mps_dprint(sassc->sc, MPS_XINFO, "mpssas_action XPT_RESET_DEV\n");
 1007                 mpssas_action_resetdev(sassc, ccb);
 1008                 return;
 1009         case XPT_RESET_BUS:
 1010         case XPT_ABORT:
 1011         case XPT_TERM_IO:
 1012                 mps_dprint(sassc->sc, MPS_XINFO,
 1013                     "mpssas_action faking success for abort or reset\n");
 1014                 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
 1015                 break;
 1016         case XPT_SCSI_IO:
 1017                 mpssas_action_scsiio(sassc, ccb);
 1018                 return;
 1019         case XPT_SMP_IO:
 1020                 mpssas_action_smpio(sassc, ccb);
 1021                 return;
 1022         default:
 1023                 mpssas_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL);
 1024                 break;
 1025         }
 1026         xpt_done(ccb);
 1027 
 1028 }
 1029 
 1030 static void
 1031 mpssas_announce_reset(struct mps_softc *sc, uint32_t ac_code,
 1032     target_id_t target_id, lun_id_t lun_id)
 1033 {
 1034         path_id_t path_id = cam_sim_path(sc->sassc->sim);
 1035         struct cam_path *path;
 1036 
 1037         mps_dprint(sc, MPS_XINFO, "%s code %x target %d lun %jx\n", __func__,
 1038             ac_code, target_id, (uintmax_t)lun_id);
 1039 
 1040         if (xpt_create_path(&path, NULL, 
 1041                 path_id, target_id, lun_id) != CAM_REQ_CMP) {
 1042                 mps_dprint(sc, MPS_ERROR, "unable to create path for reset "
 1043                            "notification\n");
 1044                 return;
 1045         }
 1046 
 1047         xpt_async(ac_code, path, NULL);
 1048         xpt_free_path(path);
 1049 }
 1050 
 1051 static void 
 1052 mpssas_complete_all_commands(struct mps_softc *sc)
 1053 {
 1054         struct mps_command *cm;
 1055         int i;
 1056         int completed;
 1057 
 1058         MPS_FUNCTRACE(sc);
 1059         mtx_assert(&sc->mps_mtx, MA_OWNED);
 1060 
 1061         /* complete all commands with a NULL reply */
 1062         for (i = 1; i < sc->num_reqs; i++) {
 1063                 cm = &sc->commands[i];
 1064                 if (cm->cm_state == MPS_CM_STATE_FREE)
 1065                         continue;
 1066 
 1067                 cm->cm_state = MPS_CM_STATE_BUSY;
 1068                 cm->cm_reply = NULL;
 1069                 completed = 0;
 1070 
 1071                 if (cm->cm_flags & MPS_CM_FLAGS_SATA_ID_TIMEOUT) {
 1072                         MPASS(cm->cm_data);
 1073                         free(cm->cm_data, M_MPT2);
 1074                         cm->cm_data = NULL;
 1075                 }
 1076 
 1077                 if (cm->cm_flags & MPS_CM_FLAGS_POLLED)
 1078                         cm->cm_flags |= MPS_CM_FLAGS_COMPLETE;
 1079 
 1080                 if (cm->cm_complete != NULL) {
 1081                         mpssas_log_command(cm, MPS_RECOVERY,
 1082                             "completing cm %p state %x ccb %p for diag reset\n",
 1083                             cm, cm->cm_state, cm->cm_ccb);
 1084 
 1085                         cm->cm_complete(sc, cm);
 1086                         completed = 1;
 1087                 } else if (cm->cm_flags & MPS_CM_FLAGS_WAKEUP) {
 1088                         mpssas_log_command(cm, MPS_RECOVERY,
 1089                             "waking up cm %p state %x ccb %p for diag reset\n", 
 1090                             cm, cm->cm_state, cm->cm_ccb);
 1091                         wakeup(cm);
 1092                         completed = 1;
 1093                 }
 1094 
 1095                 if ((completed == 0) && (cm->cm_state != MPS_CM_STATE_FREE)) {
 1096                         /* this should never happen, but if it does, log */
 1097                         mpssas_log_command(cm, MPS_RECOVERY,
 1098                             "cm %p state %x flags 0x%x ccb %p during diag "
 1099                             "reset\n", cm, cm->cm_state, cm->cm_flags,
 1100                             cm->cm_ccb);
 1101                 }
 1102         }
 1103 
 1104         sc->io_cmds_active = 0;
 1105 }
 1106 
 1107 void
 1108 mpssas_handle_reinit(struct mps_softc *sc)
 1109 {
 1110         int i;
 1111 
 1112         /* Go back into startup mode and freeze the simq, so that CAM
 1113          * doesn't send any commands until after we've rediscovered all
 1114          * targets and found the proper device handles for them.
 1115          *
 1116          * After the reset, portenable will trigger discovery, and after all
 1117          * discovery-related activities have finished, the simq will be
 1118          * released.
 1119          */
 1120         mps_dprint(sc, MPS_INIT, "%s startup\n", __func__);
 1121         sc->sassc->flags |= MPSSAS_IN_STARTUP;
 1122         sc->sassc->flags |= MPSSAS_IN_DISCOVERY;
 1123         mpssas_startup_increment(sc->sassc);
 1124 
 1125         /* notify CAM of a bus reset */
 1126         mpssas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD, 
 1127             CAM_LUN_WILDCARD);
 1128 
 1129         /* complete and cleanup after all outstanding commands */
 1130         mpssas_complete_all_commands(sc);
 1131 
 1132         mps_dprint(sc, MPS_INIT,
 1133             "%s startup %u after command completion\n", __func__,
 1134             sc->sassc->startup_refcount);
 1135 
 1136         /* zero all the target handles, since they may change after the
 1137          * reset, and we have to rediscover all the targets and use the new
 1138          * handles.  
 1139          */
 1140         for (i = 0; i < sc->sassc->maxtargets; i++) {
 1141                 if (sc->sassc->targets[i].outstanding != 0)
 1142                         mps_dprint(sc, MPS_INIT, "target %u outstanding %u\n", 
 1143                             i, sc->sassc->targets[i].outstanding);
 1144                 sc->sassc->targets[i].handle = 0x0;
 1145                 sc->sassc->targets[i].exp_dev_handle = 0x0;
 1146                 sc->sassc->targets[i].outstanding = 0;
 1147                 sc->sassc->targets[i].flags = MPSSAS_TARGET_INDIAGRESET;
 1148         }
 1149 }
 1150 
 1151 static void
 1152 mpssas_tm_timeout(void *data)
 1153 {
 1154         struct mps_command *tm = data;
 1155         struct mps_softc *sc = tm->cm_sc;
 1156 
 1157         mtx_assert(&sc->mps_mtx, MA_OWNED);
 1158 
 1159         mpssas_log_command(tm, MPS_INFO|MPS_RECOVERY,
 1160             "task mgmt %p timed out\n", tm);
 1161 
 1162         KASSERT(tm->cm_state == MPS_CM_STATE_INQUEUE,
 1163             ("command not inqueue, state = %u\n", tm->cm_state));
 1164 
 1165         tm->cm_state = MPS_CM_STATE_BUSY;
 1166         mps_reinit(sc);
 1167 }
 1168 
 1169 static void
 1170 mpssas_logical_unit_reset_complete(struct mps_softc *sc, struct mps_command *tm)
 1171 {
 1172         MPI2_SCSI_TASK_MANAGE_REPLY *reply;
 1173         unsigned int cm_count = 0;
 1174         struct mps_command *cm;
 1175         struct mpssas_target *targ;
 1176 
 1177         callout_stop(&tm->cm_callout);
 1178 
 1179         reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
 1180         targ = tm->cm_targ;
 1181 
 1182         /*
 1183          * Currently there should be no way we can hit this case.  It only
 1184          * happens when we have a failure to allocate chain frames, and
 1185          * task management commands don't have S/G lists.
 1186          * XXXSL So should it be an assertion?
 1187          */
 1188         if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
 1189                 mps_dprint(sc, MPS_RECOVERY|MPS_ERROR,
 1190                     "%s: cm_flags = %#x for LUN reset! "
 1191                    "This should not happen!\n", __func__, tm->cm_flags);
 1192                 mpssas_free_tm(sc, tm);
 1193                 return;
 1194         }
 1195 
 1196         if (reply == NULL) {
 1197                 mps_dprint(sc, MPS_RECOVERY, "NULL reset reply for tm %p\n",
 1198                     tm);
 1199                 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
 1200                         /* this completion was due to a reset, just cleanup */
 1201                         mps_dprint(sc, MPS_RECOVERY, "Hardware undergoing "
 1202                             "reset, ignoring NULL LUN reset reply\n");
 1203                         targ->tm = NULL;
 1204                         mpssas_free_tm(sc, tm);
 1205                 }
 1206                 else {
 1207                         /* we should have gotten a reply. */
 1208                         mps_dprint(sc, MPS_INFO|MPS_RECOVERY, "NULL reply on "
 1209                             "LUN reset attempt, resetting controller\n");
 1210                         mps_reinit(sc);
 1211                 }
 1212                 return;
 1213         }
 1214 
 1215         mps_dprint(sc, MPS_RECOVERY,
 1216             "logical unit reset status 0x%x code 0x%x count %u\n",
 1217             le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
 1218             le32toh(reply->TerminationCount));
 1219                 
 1220         /*
 1221          * See if there are any outstanding commands for this LUN.
 1222          * This could be made more efficient by using a per-LU data
 1223          * structure of some sort.
 1224          */
 1225         TAILQ_FOREACH(cm, &targ->commands, cm_link) {
 1226                 if (cm->cm_lun == tm->cm_lun)
 1227                         cm_count++;
 1228         }
 1229 
 1230         if (cm_count == 0) {
 1231                 mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
 1232                     "Finished recovery after LUN reset for target %u\n",
 1233                     targ->tid);
 1234 
 1235                 mpssas_announce_reset(sc, AC_SENT_BDR, targ->tid, tm->cm_lun);
 1236 
 1237                 /*
 1238                  * We've finished recovery for this logical unit.  check and
 1239                  * see if some other logical unit has a timedout command
 1240                  * that needs to be processed.
 1241                  */
 1242                 cm = TAILQ_FIRST(&targ->timedout_commands);
 1243                 if (cm) {
 1244                         mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
 1245                             "More commands to abort for target %u\n",
 1246                             targ->tid);
 1247                         mpssas_send_abort(sc, tm, cm);
 1248                 } else {
 1249                         targ->tm = NULL;
 1250                         mpssas_free_tm(sc, tm);
 1251                 }
 1252         } else {
 1253                 /*
 1254                  * If we still have commands for this LUN, the reset
 1255                  * effectively failed, regardless of the status reported.
 1256                  * Escalate to a target reset.
 1257                  */
 1258                 mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
 1259                     "logical unit reset complete for target %u, but still "
 1260                     "have %u command(s), sending target reset\n", targ->tid,
 1261                     cm_count);
 1262                 mpssas_send_reset(sc, tm,
 1263                     MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
 1264         }
 1265 }
 1266 
 1267 static void
 1268 mpssas_target_reset_complete(struct mps_softc *sc, struct mps_command *tm)
 1269 {
 1270         MPI2_SCSI_TASK_MANAGE_REPLY *reply;
 1271         MPI2_SCSI_TASK_MANAGE_REQUEST *req;
 1272         struct mpssas_target *targ;
 1273 
 1274         callout_stop(&tm->cm_callout);
 1275 
 1276         req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
 1277         reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
 1278         targ = tm->cm_targ;
 1279 
 1280         /*
 1281          * Currently there should be no way we can hit this case.  It only
 1282          * happens when we have a failure to allocate chain frames, and
 1283          * task management commands don't have S/G lists.
 1284          */
 1285         if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
 1286                 mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x for target reset! "
 1287                            "This should not happen!\n", __func__, tm->cm_flags);
 1288                 mpssas_free_tm(sc, tm);
 1289                 return;
 1290         }
 1291 
 1292         if (reply == NULL) {
 1293                 mps_dprint(sc, MPS_RECOVERY,
 1294                     "NULL target reset reply for tm %pi TaskMID %u\n",
 1295                     tm, le16toh(req->TaskMID));
 1296                 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
 1297                         /* this completion was due to a reset, just cleanup */
 1298                         mps_dprint(sc, MPS_RECOVERY, "Hardware undergoing "
 1299                             "reset, ignoring NULL target reset reply\n");
 1300                         targ->tm = NULL;
 1301                         mpssas_free_tm(sc, tm);
 1302                 } else {
 1303                         /* we should have gotten a reply. */
 1304                         mps_dprint(sc, MPS_INFO|MPS_RECOVERY, "NULL reply on "
 1305                             "target reset attempt, resetting controller\n");
 1306                         mps_reinit(sc);
 1307                 }
 1308                 return;
 1309         }
 1310 
 1311         mps_dprint(sc, MPS_RECOVERY,
 1312             "target reset status 0x%x code 0x%x count %u\n",
 1313             le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
 1314             le32toh(reply->TerminationCount));
 1315 
 1316         if (targ->outstanding == 0) {
 1317                 /* we've finished recovery for this target and all
 1318                  * of its logical units.
 1319                  */
 1320                 mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
 1321                     "Finished reset recovery for target %u\n", targ->tid);
 1322 
 1323                 mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
 1324                     CAM_LUN_WILDCARD);
 1325 
 1326                 targ->tm = NULL;
 1327                 mpssas_free_tm(sc, tm);
 1328         } else {
 1329                 /*
 1330                  * After a target reset, if this target still has
 1331                  * outstanding commands, the reset effectively failed,
 1332                  * regardless of the status reported.  escalate.
 1333                  */
 1334                 mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
 1335                     "Target reset complete for target %u, but still have %u "
 1336                     "command(s), resetting controller\n", targ->tid,
 1337                     targ->outstanding);
 1338                 mps_reinit(sc);
 1339         }
 1340 }
 1341 
 1342 #define MPS_RESET_TIMEOUT 30
 1343 
 1344 int
 1345 mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type)
 1346 {
 1347         MPI2_SCSI_TASK_MANAGE_REQUEST *req;
 1348         struct mpssas_target *target;
 1349         int err;
 1350 
 1351         target = tm->cm_targ;
 1352         if (target->handle == 0) {
 1353                 mps_dprint(sc, MPS_ERROR,"%s null devhandle for target_id %d\n",
 1354                     __func__, target->tid);
 1355                 return -1;
 1356         }
 1357 
 1358         req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
 1359         req->DevHandle = htole16(target->handle);
 1360         req->TaskType = type;
 1361 
 1362         if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
 1363                 /* XXX Need to handle invalid LUNs */
 1364                 MPS_SET_LUN(req->LUN, tm->cm_lun);
 1365                 tm->cm_targ->logical_unit_resets++;
 1366                 mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
 1367                     "Sending logical unit reset to target %u lun %d\n",
 1368                     target->tid, tm->cm_lun);
 1369                 tm->cm_complete = mpssas_logical_unit_reset_complete;
 1370                 mpssas_prepare_for_tm(sc, tm, target, tm->cm_lun);
 1371         } else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
 1372                 /*
 1373                  * Target reset method =
 1374                  *      SAS Hard Link Reset / SATA Link Reset
 1375                  */
 1376                 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
 1377                 tm->cm_targ->target_resets++;
 1378                 mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
 1379                     "Sending target reset to target %u\n", target->tid);
 1380                 tm->cm_complete = mpssas_target_reset_complete;
 1381                 mpssas_prepare_for_tm(sc, tm, target, CAM_LUN_WILDCARD);
 1382         } else {
 1383                 mps_dprint(sc, MPS_ERROR, "unexpected reset type 0x%x\n", type);
 1384                 return -1;
 1385         }
 1386 
 1387         tm->cm_data = NULL;
 1388         tm->cm_complete_data = (void *)tm;
 1389 
 1390         callout_reset(&tm->cm_callout, MPS_RESET_TIMEOUT * hz,
 1391             mpssas_tm_timeout, tm);
 1392 
 1393         err = mps_map_command(sc, tm);
 1394         if (err)
 1395                 mps_dprint(sc, MPS_ERROR|MPS_RECOVERY,
 1396                     "error %d sending reset type %u\n",
 1397                     err, type);
 1398 
 1399         return err;
 1400 }
 1401 
 1402 static void
 1403 mpssas_abort_complete(struct mps_softc *sc, struct mps_command *tm)
 1404 {
 1405         struct mps_command *cm;
 1406         MPI2_SCSI_TASK_MANAGE_REPLY *reply;
 1407         MPI2_SCSI_TASK_MANAGE_REQUEST *req;
 1408         struct mpssas_target *targ;
 1409 
 1410         callout_stop(&tm->cm_callout);
 1411 
 1412         req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
 1413         reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
 1414         targ = tm->cm_targ;
 1415 
 1416         /*
 1417          * Currently there should be no way we can hit this case.  It only
 1418          * happens when we have a failure to allocate chain frames, and
 1419          * task management commands don't have S/G lists.
 1420          */
 1421         if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
 1422                 mps_dprint(sc, MPS_RECOVERY,
 1423                     "cm_flags = %#x for abort %p TaskMID %u!\n", 
 1424                     tm->cm_flags, tm, le16toh(req->TaskMID));
 1425                 mpssas_free_tm(sc, tm);
 1426                 return;
 1427         }
 1428 
 1429         if (reply == NULL) {
 1430                 mps_dprint(sc, MPS_RECOVERY,
 1431                     "NULL abort reply for tm %p TaskMID %u\n", 
 1432                     tm, le16toh(req->TaskMID));
 1433                 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
 1434                         /* this completion was due to a reset, just cleanup */
 1435                         mps_dprint(sc, MPS_RECOVERY, "Hardware undergoing "
 1436                             "reset, ignoring NULL abort reply\n");
 1437                         targ->tm = NULL;
 1438                         mpssas_free_tm(sc, tm);
 1439                 } else {
 1440                         /* we should have gotten a reply. */
 1441                         mps_dprint(sc, MPS_INFO|MPS_RECOVERY, "NULL reply on "
 1442                             "abort attempt, resetting controller\n");
 1443                         mps_reinit(sc);
 1444                 }
 1445                 return;
 1446         }
 1447 
 1448         mps_dprint(sc, MPS_RECOVERY,
 1449             "abort TaskMID %u status 0x%x code 0x%x count %u\n",
 1450             le16toh(req->TaskMID),
 1451             le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
 1452             le32toh(reply->TerminationCount));
 1453 
 1454         cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
 1455         if (cm == NULL) {
 1456                 /*
 1457                  * If there are no more timedout commands, we're done with
 1458                  * error recovery for this target.
 1459                  */
 1460                 mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
 1461                     "Finished abort recovery for target %u\n", targ->tid);
 1462 
 1463                 targ->tm = NULL;
 1464                 mpssas_free_tm(sc, tm);
 1465         } else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
 1466                 /* abort success, but we have more timedout commands to abort */
 1467                 mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
 1468                     "Continuing abort recovery for target %u\n", targ->tid);
 1469                 
 1470                 mpssas_send_abort(sc, tm, cm);
 1471         } else {
 1472                 /* we didn't get a command completion, so the abort
 1473                  * failed as far as we're concerned.  escalate.
 1474                  */
 1475                 mps_dprint(sc, MPS_RECOVERY,
 1476                     "Abort failed for target %u, sending logical unit reset\n",
 1477                     targ->tid);
 1478 
 1479                 mpssas_send_reset(sc, tm, 
 1480                     MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
 1481         }
 1482 }
 1483 
 1484 #define MPS_ABORT_TIMEOUT 5
 1485 
 1486 static int
 1487 mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm)
 1488 {
 1489         MPI2_SCSI_TASK_MANAGE_REQUEST *req;
 1490         struct mpssas_target *targ;
 1491         int err;
 1492 
 1493         targ = cm->cm_targ;
 1494         if (targ->handle == 0) {
 1495                 mps_dprint(sc, MPS_ERROR|MPS_RECOVERY,
 1496                     "%s null devhandle for target_id %d\n",
 1497                     __func__, cm->cm_ccb->ccb_h.target_id);
 1498                 return -1;
 1499         }
 1500 
 1501         mpssas_log_command(cm, MPS_RECOVERY|MPS_INFO,
 1502             "Aborting command %p\n", cm);
 1503 
 1504         req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
 1505         req->DevHandle = htole16(targ->handle);
 1506         req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
 1507 
 1508         /* XXX Need to handle invalid LUNs */
 1509         MPS_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
 1510 
 1511         req->TaskMID = htole16(cm->cm_desc.Default.SMID);
 1512 
 1513         tm->cm_data = NULL;
 1514         tm->cm_complete = mpssas_abort_complete;
 1515         tm->cm_complete_data = (void *)tm;
 1516         tm->cm_targ = cm->cm_targ;
 1517         tm->cm_lun = cm->cm_lun;
 1518 
 1519         callout_reset(&tm->cm_callout, MPS_ABORT_TIMEOUT * hz,
 1520             mpssas_tm_timeout, tm);
 1521 
 1522         targ->aborts++;
 1523 
 1524         mpssas_prepare_for_tm(sc, tm, targ, tm->cm_lun);
 1525 
 1526         err = mps_map_command(sc, tm);
 1527         if (err)
 1528                 mps_dprint(sc, MPS_ERROR|MPS_RECOVERY,
 1529                     "error %d sending abort for cm %p SMID %u\n",
 1530                     err, cm, req->TaskMID);
 1531         return err;
 1532 }
 1533 
 1534 static void
 1535 mpssas_scsiio_timeout(void *data)
 1536 {
 1537         sbintime_t elapsed, now;
 1538         union ccb *ccb;
 1539         struct mps_softc *sc;
 1540         struct mps_command *cm;
 1541         struct mpssas_target *targ;
 1542 
 1543         cm = (struct mps_command *)data;
 1544         sc = cm->cm_sc;
 1545         ccb = cm->cm_ccb;
 1546         now = sbinuptime();
 1547 
 1548         MPS_FUNCTRACE(sc);
 1549         mtx_assert(&sc->mps_mtx, MA_OWNED);
 1550 
 1551         mps_dprint(sc, MPS_XINFO|MPS_RECOVERY, "Timeout checking cm %p\n", cm);
 1552 
 1553         /*
 1554          * Run the interrupt handler to make sure it's not pending.  This
 1555          * isn't perfect because the command could have already completed
 1556          * and been re-used, though this is unlikely.
 1557          */
 1558         mps_intr_locked(sc);
 1559         if (cm->cm_flags & MPS_CM_FLAGS_ON_RECOVERY) {
 1560                 mpssas_log_command(cm, MPS_XINFO,
 1561                     "SCSI command %p almost timed out\n", cm);
 1562                 return;
 1563         }
 1564 
 1565         if (cm->cm_ccb == NULL) {
 1566                 mps_dprint(sc, MPS_ERROR, "command timeout with NULL ccb\n");
 1567                 return;
 1568         }
 1569 
 1570         targ = cm->cm_targ;
 1571         targ->timeouts++;
 1572 
 1573         elapsed = now - ccb->ccb_h.qos.sim_data;
 1574         mpssas_log_command(cm, MPS_INFO|MPS_RECOVERY,
 1575             "Command timeout on target %u(0x%04x) %d set, %d.%d elapsed\n",
 1576             targ->tid, targ->handle, ccb->ccb_h.timeout,
 1577             sbintime_getsec(elapsed), elapsed & 0xffffffff);
 1578 
 1579         /* XXX first, check the firmware state, to see if it's still
 1580          * operational.  if not, do a diag reset.
 1581          */
 1582         mpssas_set_ccbstatus(cm->cm_ccb, CAM_CMD_TIMEOUT);
 1583         cm->cm_flags |= MPS_CM_FLAGS_ON_RECOVERY | MPS_CM_FLAGS_TIMEDOUT;
 1584         TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
 1585 
 1586         if (targ->tm != NULL) {
 1587                 /* target already in recovery, just queue up another
 1588                  * timedout command to be processed later.
 1589                  */
 1590                 mps_dprint(sc, MPS_RECOVERY,
 1591                     "queued timedout cm %p for processing by tm %p\n",
 1592                     cm, targ->tm);
 1593         } else if ((targ->tm = mpssas_alloc_tm(sc)) != NULL) {
 1594                 mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
 1595                     "Sending abort to target %u for SMID %d\n", targ->tid,
 1596                     cm->cm_desc.Default.SMID);
 1597                 mps_dprint(sc, MPS_RECOVERY, "timedout cm %p allocated tm %p\n",
 1598                     cm, targ->tm);
 1599 
 1600                 /* start recovery by aborting the first timedout command */
 1601                 mpssas_send_abort(sc, targ->tm, cm);
 1602         } else {
 1603                 /* XXX queue this target up for recovery once a TM becomes
 1604                  * available.  The firmware only has a limited number of
 1605                  * HighPriority credits for the high priority requests used
 1606                  * for task management, and we ran out.
 1607                  * 
 1608                  * Isilon: don't worry about this for now, since we have
 1609                  * more credits than disks in an enclosure, and limit
 1610                  * ourselves to one TM per target for recovery.
 1611                  */
 1612                 mps_dprint(sc, MPS_ERROR|MPS_RECOVERY,
 1613                     "timedout cm %p failed to allocate a tm\n", cm);
 1614         }
 1615 
 1616 }
 1617 
 1618 static void
 1619 mpssas_action_scsiio(struct mpssas_softc *sassc, union ccb *ccb)
 1620 {
 1621         MPI2_SCSI_IO_REQUEST *req;
 1622         struct ccb_scsiio *csio;
 1623         struct mps_softc *sc;
 1624         struct mpssas_target *targ;
 1625         struct mpssas_lun *lun;
 1626         struct mps_command *cm;
 1627         uint8_t i, lba_byte, *ref_tag_addr;
 1628         uint16_t eedp_flags;
 1629         uint32_t mpi_control;
 1630 
 1631         sc = sassc->sc;
 1632         MPS_FUNCTRACE(sc);
 1633         mtx_assert(&sc->mps_mtx, MA_OWNED);
 1634 
 1635         csio = &ccb->csio;
 1636         KASSERT(csio->ccb_h.target_id < sassc->maxtargets,
 1637             ("Target %d out of bounds in XPT_SCSI_IO\n",
 1638              csio->ccb_h.target_id));
 1639         targ = &sassc->targets[csio->ccb_h.target_id];
 1640         mps_dprint(sc, MPS_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
 1641         if (targ->handle == 0x0) {
 1642                 if (targ->flags & MPSSAS_TARGET_INDIAGRESET) {
 1643                         mps_dprint(sc, MPS_ERROR,
 1644                             "%s NULL handle for target %u in diag reset freezing queue\n",
 1645                             __func__, csio->ccb_h.target_id);
 1646                         ccb->ccb_h.status = CAM_REQUEUE_REQ | CAM_DEV_QFRZN;
 1647                         xpt_freeze_devq(ccb->ccb_h.path, 1);
 1648                         xpt_done(ccb);
 1649                         return;
 1650                 }
 1651                 mps_dprint(sc, MPS_ERROR, "%s NULL handle for target %u\n", 
 1652                     __func__, csio->ccb_h.target_id);
 1653                 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
 1654                 xpt_done(ccb);
 1655                 return;
 1656         }
 1657         if (targ->flags & MPS_TARGET_FLAGS_RAID_COMPONENT) {
 1658                 mps_dprint(sc, MPS_ERROR, "%s Raid component no SCSI IO "
 1659                     "supported %u\n", __func__, csio->ccb_h.target_id);
 1660                 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
 1661                 xpt_done(ccb);
 1662                 return;
 1663         }
 1664         /*
 1665          * Sometimes, it is possible to get a command that is not "In
 1666          * Progress" and was actually aborted by the upper layer.  Check for
 1667          * this here and complete the command without error.
 1668          */
 1669         if (mpssas_get_ccbstatus(ccb) != CAM_REQ_INPROG) {
 1670                 mps_dprint(sc, MPS_TRACE, "%s Command is not in progress for "
 1671                     "target %u\n", __func__, csio->ccb_h.target_id);
 1672                 xpt_done(ccb);
 1673                 return;
 1674         }
 1675         /*
 1676          * If devinfo is 0 this will be a volume.  In that case don't tell CAM
 1677          * that the volume has timed out.  We want volumes to be enumerated
 1678          * until they are deleted/removed, not just failed. In either event,
 1679          * we're removing the target due to a firmware event telling us
 1680          * the device is now gone (as opposed to some transient event). Since
 1681          * we're opting to remove failed devices from the OS's view, we need
 1682          * to propagate that status up the stack.
 1683          */
 1684         if (targ->flags & MPSSAS_TARGET_INREMOVAL) {
 1685                 if (targ->devinfo == 0)
 1686                         mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
 1687                 else
 1688                         mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
 1689                 xpt_done(ccb);
 1690                 return;
 1691         }
 1692 
 1693         if ((sc->mps_flags & MPS_FLAGS_SHUTDOWN) != 0) {
 1694                 mps_dprint(sc, MPS_INFO, "%s shutting down\n", __func__);
 1695                 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
 1696                 xpt_done(ccb);
 1697                 return;
 1698         }
 1699 
 1700         /*
 1701          * If target has a reset in progress, the devq should be frozen.
 1702          * Geting here we likely hit a race, so just requeue.
 1703          */
 1704         if (targ->flags & MPSSAS_TARGET_INRESET) {
 1705                 ccb->ccb_h.status = CAM_REQUEUE_REQ | CAM_DEV_QFRZN;
 1706                 mps_dprint(sc, MPS_INFO, "%s: Freezing devq for target ID %d\n",
 1707                     __func__, targ->tid);
 1708                 xpt_freeze_devq(ccb->ccb_h.path, 1);
 1709                 xpt_done(ccb);
 1710                 return;
 1711         }
 1712 
 1713         cm = mps_alloc_command(sc);
 1714         if (cm == NULL || (sc->mps_flags & MPS_FLAGS_DIAGRESET)) {
 1715                 if (cm != NULL) {
 1716                         mps_free_command(sc, cm);
 1717                 }
 1718                 if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
 1719                         xpt_freeze_simq(sassc->sim, 1);
 1720                         sassc->flags |= MPSSAS_QUEUE_FROZEN;
 1721                 }
 1722                 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
 1723                 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
 1724                 xpt_done(ccb);
 1725                 return;
 1726         }
 1727 
 1728         req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
 1729         bzero(req, sizeof(*req));
 1730         req->DevHandle = htole16(targ->handle);
 1731         req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
 1732         req->MsgFlags = 0;
 1733         req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
 1734         req->SenseBufferLength = MPS_SENSE_LEN;
 1735         req->SGLFlags = 0;
 1736         req->ChainOffset = 0;
 1737         req->SGLOffset0 = 24;   /* 32bit word offset to the SGL */
 1738         req->SGLOffset1= 0;
 1739         req->SGLOffset2= 0;
 1740         req->SGLOffset3= 0;
 1741         req->SkipCount = 0;
 1742         req->DataLength = htole32(csio->dxfer_len);
 1743         req->BidirectionalDataLength = 0;
 1744         req->IoFlags = htole16(csio->cdb_len);
 1745         req->EEDPFlags = 0;
 1746 
 1747         /* Note: BiDirectional transfers are not supported */
 1748         switch (csio->ccb_h.flags & CAM_DIR_MASK) {
 1749         case CAM_DIR_IN:
 1750                 mpi_control = MPI2_SCSIIO_CONTROL_READ;
 1751                 cm->cm_flags |= MPS_CM_FLAGS_DATAIN;
 1752                 break;
 1753         case CAM_DIR_OUT:
 1754                 mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
 1755                 cm->cm_flags |= MPS_CM_FLAGS_DATAOUT;
 1756                 break;
 1757         case CAM_DIR_NONE:
 1758         default:
 1759                 mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
 1760                 break;
 1761         }
 1762 
 1763         if (csio->cdb_len == 32)
 1764                 mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
 1765         /*
 1766          * It looks like the hardware doesn't require an explicit tag
 1767          * number for each transaction.  SAM Task Management not supported
 1768          * at the moment.
 1769          */
 1770         switch (csio->tag_action) {
 1771         case MSG_HEAD_OF_Q_TAG:
 1772                 mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
 1773                 break;
 1774         case MSG_ORDERED_Q_TAG:
 1775                 mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
 1776                 break;
 1777         case MSG_ACA_TASK:
 1778                 mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
 1779                 break;
 1780         case CAM_TAG_ACTION_NONE:
 1781         case MSG_SIMPLE_Q_TAG:
 1782         default:
 1783                 mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
 1784                 break;
 1785         }
 1786         mpi_control |= (csio->priority << MPI2_SCSIIO_CONTROL_TASKPRI_SHIFT) &
 1787             MPI2_SCSIIO_CONTROL_TASKPRI_MASK;
 1788         mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
 1789         req->Control = htole32(mpi_control);
 1790         if (MPS_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
 1791                 mps_free_command(sc, cm);
 1792                 mpssas_set_ccbstatus(ccb, CAM_LUN_INVALID);
 1793                 xpt_done(ccb);
 1794                 return;
 1795         }
 1796 
 1797         if (csio->ccb_h.flags & CAM_CDB_POINTER)
 1798                 bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
 1799         else
 1800                 bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
 1801         req->IoFlags = htole16(csio->cdb_len);
 1802 
 1803         /*
 1804          * Check if EEDP is supported and enabled.  If it is then check if the
 1805          * SCSI opcode could be using EEDP.  If so, make sure the LUN exists and
 1806          * is formatted for EEDP support.  If all of this is true, set CDB up
 1807          * for EEDP transfer.
 1808          */
 1809         eedp_flags = op_code_prot[req->CDB.CDB32[0]];
 1810         if (sc->eedp_enabled && eedp_flags) {
 1811                 SLIST_FOREACH(lun, &targ->luns, lun_link) {
 1812                         if (lun->lun_id == csio->ccb_h.target_lun) {
 1813                                 break;
 1814                         }
 1815                 }
 1816 
 1817                 if ((lun != NULL) && (lun->eedp_formatted)) {
 1818                         req->EEDPBlockSize = htole16(lun->eedp_block_size);
 1819                         eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
 1820                             MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
 1821                             MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
 1822                         req->EEDPFlags = htole16(eedp_flags);
 1823 
 1824                         /*
 1825                          * If CDB less than 32, fill in Primary Ref Tag with
 1826                          * low 4 bytes of LBA.  If CDB is 32, tag stuff is
 1827                          * already there.  Also, set protection bit.  FreeBSD
 1828                          * currently does not support CDBs bigger than 16, but
 1829                          * the code doesn't hurt, and will be here for the
 1830                          * future.
 1831                          */
 1832                         if (csio->cdb_len != 32) {
 1833                                 lba_byte = (csio->cdb_len == 16) ? 6 : 2;
 1834                                 ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
 1835                                     PrimaryReferenceTag;
 1836                                 for (i = 0; i < 4; i++) {
 1837                                         *ref_tag_addr =
 1838                                             req->CDB.CDB32[lba_byte + i];
 1839                                         ref_tag_addr++;
 1840                                 }
 1841                                 req->CDB.EEDP32.PrimaryReferenceTag = 
 1842                                         htole32(req->CDB.EEDP32.PrimaryReferenceTag);
 1843                                 req->CDB.EEDP32.PrimaryApplicationTagMask =
 1844                                     0xFFFF;
 1845                                 req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) |
 1846                                     0x20;
 1847                         } else {
 1848                                 eedp_flags |=
 1849                                     MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
 1850                                 req->EEDPFlags = htole16(eedp_flags);
 1851                                 req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
 1852                                     0x1F) | 0x20;
 1853                         }
 1854                 }
 1855         }
 1856 
 1857         cm->cm_length = csio->dxfer_len;
 1858         if (cm->cm_length != 0) {
 1859                 cm->cm_data = ccb;
 1860                 cm->cm_flags |= MPS_CM_FLAGS_USE_CCB;
 1861         } else {
 1862                 cm->cm_data = NULL;
 1863         }
 1864         cm->cm_sge = &req->SGL;
 1865         cm->cm_sglsize = (32 - 24) * 4;
 1866         cm->cm_desc.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
 1867         cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
 1868         cm->cm_complete = mpssas_scsiio_complete;
 1869         cm->cm_complete_data = ccb;
 1870         cm->cm_targ = targ;
 1871         cm->cm_lun = csio->ccb_h.target_lun;
 1872         cm->cm_ccb = ccb;
 1873 
 1874         /*
 1875          * If HBA is a WD and the command is not for a retry, try to build a
 1876          * direct I/O message. If failed, or the command is for a retry, send
 1877          * the I/O to the IR volume itself.
 1878          */
 1879         if (sc->WD_valid_config) {
 1880                 if (ccb->ccb_h.sim_priv.entries[0].field == MPS_WD_RETRY) {
 1881                         mpssas_direct_drive_io(sassc, cm, ccb);
 1882                 } else {
 1883                         mpssas_set_ccbstatus(ccb, CAM_REQ_INPROG);
 1884                 }
 1885         }
 1886 
 1887 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
 1888         if (csio->bio != NULL)
 1889                 biotrack(csio->bio, __func__);
 1890 #endif
 1891         csio->ccb_h.qos.sim_data = sbinuptime();
 1892         callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
 1893             mpssas_scsiio_timeout, cm, 0);
 1894 
 1895         targ->issued++;
 1896         targ->outstanding++;
 1897         TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
 1898         ccb->ccb_h.status |= CAM_SIM_QUEUED;
 1899 
 1900         mpssas_log_command(cm, MPS_XINFO, "%s cm %p ccb %p outstanding %u\n",
 1901             __func__, cm, ccb, targ->outstanding);
 1902 
 1903         mps_map_command(sc, cm);
 1904         return;
 1905 }
 1906 
 1907 /**
 1908  * mps_sc_failed_io_info - translated non-succesfull SCSI_IO request
 1909  */
 1910 static void
 1911 mps_sc_failed_io_info(struct mps_softc *sc, struct ccb_scsiio *csio,
 1912     Mpi2SCSIIOReply_t *mpi_reply)
 1913 {
 1914         u32 response_info;
 1915         u8 *response_bytes;
 1916         u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
 1917             MPI2_IOCSTATUS_MASK;
 1918         u8 scsi_state = mpi_reply->SCSIState;
 1919         u8 scsi_status = mpi_reply->SCSIStatus;
 1920         u32 log_info = le32toh(mpi_reply->IOCLogInfo);
 1921         const char *desc_ioc_state, *desc_scsi_status;
 1922 
 1923         if (log_info == 0x31170000)
 1924                 return;
 1925 
 1926         desc_ioc_state = mps_describe_table(mps_iocstatus_string,
 1927             ioc_status);
 1928         desc_scsi_status = mps_describe_table(mps_scsi_status_string,
 1929             scsi_status);
 1930 
 1931         mps_dprint(sc, MPS_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
 1932             le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
 1933 
 1934         /*
 1935          *We can add more detail about underflow data here
 1936          * TO-DO
 1937          */
 1938         mps_dprint(sc, MPS_XINFO, "\tscsi_status(%s)(0x%02x), "
 1939             "scsi_state %b\n", desc_scsi_status, scsi_status,
 1940             scsi_state, "\2" "\1AutosenseValid" "\2AutosenseFailed"
 1941             "\3NoScsiStatus" "\4Terminated" "\5Response InfoValid");
 1942 
 1943         if (sc->mps_debug & MPS_XINFO &&
 1944                 scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
 1945                 mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : Start :\n");
 1946                 scsi_sense_print(csio);
 1947                 mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : End :\n");
 1948         }
 1949 
 1950         if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
 1951                 response_info = le32toh(mpi_reply->ResponseInfo);
 1952                 response_bytes = (u8 *)&response_info;
 1953                 mps_dprint(sc, MPS_XINFO, "response code(0x%1x): %s\n",
 1954                     response_bytes[0],
 1955                     mps_describe_table(mps_scsi_taskmgmt_string,
 1956                     response_bytes[0]));
 1957         }
 1958 }
 1959 
 1960 static void
 1961 mpssas_scsiio_complete(struct mps_softc *sc, struct mps_command *cm)
 1962 {
 1963         MPI2_SCSI_IO_REPLY *rep;
 1964         union ccb *ccb;
 1965         struct ccb_scsiio *csio;
 1966         struct mpssas_softc *sassc;
 1967         struct scsi_vpd_supported_page_list *vpd_list = NULL;
 1968         u8 *TLR_bits, TLR_on;
 1969         int dir = 0, i;
 1970         u16 alloc_len;
 1971         struct mpssas_target *target;
 1972         target_id_t target_id;
 1973 
 1974         MPS_FUNCTRACE(sc);
 1975         mps_dprint(sc, MPS_TRACE,
 1976             "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm,
 1977             cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
 1978             cm->cm_targ->outstanding);
 1979 
 1980         callout_stop(&cm->cm_callout);
 1981         mtx_assert(&sc->mps_mtx, MA_OWNED);
 1982 
 1983         sassc = sc->sassc;
 1984         ccb = cm->cm_complete_data;
 1985         csio = &ccb->csio;
 1986         target_id = csio->ccb_h.target_id;
 1987         rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
 1988         /*
 1989          * XXX KDM if the chain allocation fails, does it matter if we do
 1990          * the sync and unload here?  It is simpler to do it in every case,
 1991          * assuming it doesn't cause problems.
 1992          */
 1993         if (cm->cm_data != NULL) {
 1994                 if (cm->cm_flags & MPS_CM_FLAGS_DATAIN)
 1995                         dir = BUS_DMASYNC_POSTREAD;
 1996                 else if (cm->cm_flags & MPS_CM_FLAGS_DATAOUT)
 1997                         dir = BUS_DMASYNC_POSTWRITE;
 1998                 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
 1999                 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
 2000         }
 2001 
 2002         cm->cm_targ->completed++;
 2003         cm->cm_targ->outstanding--;
 2004         TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
 2005         ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
 2006 
 2007 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
 2008         if (ccb->csio.bio != NULL)
 2009                 biotrack(ccb->csio.bio, __func__);
 2010 #endif
 2011 
 2012         if (cm->cm_flags & MPS_CM_FLAGS_ON_RECOVERY) {
 2013                 TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
 2014                 KASSERT(cm->cm_state == MPS_CM_STATE_BUSY,
 2015                     ("Not busy for CM_FLAGS_TIMEDOUT: %u\n", cm->cm_state));
 2016                 cm->cm_flags &= ~MPS_CM_FLAGS_ON_RECOVERY;
 2017                 if (cm->cm_reply != NULL)
 2018                         mpssas_log_command(cm, MPS_RECOVERY,
 2019                             "completed timedout cm %p ccb %p during recovery "
 2020                             "ioc %x scsi %x state %x xfer %u\n",
 2021                             cm, cm->cm_ccb, le16toh(rep->IOCStatus),
 2022                             rep->SCSIStatus, rep->SCSIState,
 2023                             le32toh(rep->TransferCount));
 2024                 else
 2025                         mpssas_log_command(cm, MPS_RECOVERY,
 2026                             "completed timedout cm %p ccb %p during recovery\n",
 2027                             cm, cm->cm_ccb);
 2028         } else if (cm->cm_targ->tm != NULL) {
 2029                 if (cm->cm_reply != NULL)
 2030                         mpssas_log_command(cm, MPS_RECOVERY,
 2031                             "completed cm %p ccb %p during recovery "
 2032                             "ioc %x scsi %x state %x xfer %u\n",
 2033                             cm, cm->cm_ccb, le16toh(rep->IOCStatus),
 2034                             rep->SCSIStatus, rep->SCSIState,
 2035                             le32toh(rep->TransferCount));
 2036                 else
 2037                         mpssas_log_command(cm, MPS_RECOVERY,
 2038                             "completed cm %p ccb %p during recovery\n",
 2039                             cm, cm->cm_ccb);
 2040         } else if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
 2041                 mpssas_log_command(cm, MPS_RECOVERY,
 2042                     "reset completed cm %p ccb %p\n",
 2043                     cm, cm->cm_ccb);
 2044         }
 2045 
 2046         if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
 2047                 /*
 2048                  * We ran into an error after we tried to map the command,
 2049                  * so we're getting a callback without queueing the command
 2050                  * to the hardware.  So we set the status here, and it will
 2051                  * be retained below.  We'll go through the "fast path",
 2052                  * because there can be no reply when we haven't actually
 2053                  * gone out to the hardware.
 2054                  */
 2055                 mpssas_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
 2056 
 2057                 /*
 2058                  * Currently the only error included in the mask is
 2059                  * MPS_CM_FLAGS_CHAIN_FAILED, which means we're out of
 2060                  * chain frames.  We need to freeze the queue until we get
 2061                  * a command that completed without this error, which will
 2062                  * hopefully have some chain frames attached that we can
 2063                  * use.  If we wanted to get smarter about it, we would
 2064                  * only unfreeze the queue in this condition when we're
 2065                  * sure that we're getting some chain frames back.  That's
 2066                  * probably unnecessary.
 2067                  */
 2068                 if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
 2069                         xpt_freeze_simq(sassc->sim, 1);
 2070                         sassc->flags |= MPSSAS_QUEUE_FROZEN;
 2071                         mps_dprint(sc, MPS_XINFO, "Error sending command, "
 2072                                    "freezing SIM queue\n");
 2073                 }
 2074         }
 2075 
 2076         /*
 2077          * If this is a Start Stop Unit command and it was issued by the driver
 2078          * during shutdown, decrement the refcount to account for all of the
 2079          * commands that were sent.  All SSU commands should be completed before
 2080          * shutdown completes, meaning SSU_refcount will be 0 after SSU_started
 2081          * is TRUE.
 2082          */
 2083         if (sc->SSU_started && (csio->cdb_io.cdb_bytes[0] == START_STOP_UNIT)) {
 2084                 mps_dprint(sc, MPS_INFO, "Decrementing SSU count.\n");
 2085                 sc->SSU_refcount--;
 2086         }
 2087 
 2088         /* Take the fast path to completion */
 2089         if (cm->cm_reply == NULL) {
 2090                 if (mpssas_get_ccbstatus(ccb) == CAM_REQ_INPROG) {
 2091                         if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0)
 2092                                 mpssas_set_ccbstatus(ccb, CAM_SCSI_BUS_RESET);
 2093                         else {
 2094                                 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
 2095                                 ccb->csio.scsi_status = SCSI_STATUS_OK;
 2096                         }
 2097                         if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
 2098                                 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
 2099                                 sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
 2100                                 mps_dprint(sc, MPS_XINFO,
 2101                                     "Unfreezing SIM queue\n");
 2102                         }
 2103                 } 
 2104 
 2105                 /*
 2106                  * There are two scenarios where the status won't be
 2107                  * CAM_REQ_CMP.  The first is if MPS_CM_FLAGS_ERROR_MASK is
 2108                  * set, the second is in the MPS_FLAGS_DIAGRESET above.
 2109                  */
 2110                 if (mpssas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
 2111                         /*
 2112                          * Freeze the dev queue so that commands are
 2113                          * executed in the correct order after error
 2114                          * recovery.
 2115                          */
 2116                         ccb->ccb_h.status |= CAM_DEV_QFRZN;
 2117                         xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
 2118                 }
 2119                 mps_free_command(sc, cm);
 2120                 xpt_done(ccb);
 2121                 return;
 2122         }
 2123 
 2124         mpssas_log_command(cm, MPS_XINFO,
 2125             "ioc %x scsi %x state %x xfer %u\n",
 2126             le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
 2127             le32toh(rep->TransferCount));
 2128 
 2129         /*
 2130          * If this is a Direct Drive I/O, reissue the I/O to the original IR
 2131          * Volume if an error occurred (normal I/O retry).  Use the original
 2132          * CCB, but set a flag that this will be a retry so that it's sent to
 2133          * the original volume.  Free the command but reuse the CCB.
 2134          */
 2135         if (cm->cm_flags & MPS_CM_FLAGS_DD_IO) {
 2136                 mps_free_command(sc, cm);
 2137                 ccb->ccb_h.sim_priv.entries[0].field = MPS_WD_RETRY;
 2138                 mpssas_action_scsiio(sassc, ccb);
 2139                 return;
 2140         } else
 2141                 ccb->ccb_h.sim_priv.entries[0].field = 0;
 2142 
 2143         switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
 2144         case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
 2145                 csio->resid = cm->cm_length - le32toh(rep->TransferCount);
 2146                 /* FALLTHROUGH */
 2147         case MPI2_IOCSTATUS_SUCCESS:
 2148         case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
 2149 
 2150                 if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
 2151                     MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
 2152                         mpssas_log_command(cm, MPS_XINFO, "recovered error\n");
 2153 
 2154                 /* Completion failed at the transport level. */
 2155                 if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
 2156                     MPI2_SCSI_STATE_TERMINATED)) {
 2157                         mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
 2158                         break;
 2159                 }
 2160 
 2161                 /* In a modern packetized environment, an autosense failure
 2162                  * implies that there's not much else that can be done to
 2163                  * recover the command.
 2164                  */
 2165                 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
 2166                         mpssas_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL);
 2167                         break;
 2168                 }
 2169 
 2170                 /*
 2171                  * CAM doesn't care about SAS Response Info data, but if this is
 2172                  * the state check if TLR should be done.  If not, clear the
 2173                  * TLR_bits for the target.
 2174                  */
 2175                 if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
 2176                     ((le32toh(rep->ResponseInfo) &
 2177                     MPI2_SCSI_RI_MASK_REASONCODE) ==
 2178                     MPS_SCSI_RI_INVALID_FRAME)) {
 2179                         sc->mapping_table[target_id].TLR_bits =
 2180                             (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
 2181                 }
 2182 
 2183                 /*
 2184                  * Intentionally override the normal SCSI status reporting
 2185                  * for these two cases.  These are likely to happen in a
 2186                  * multi-initiator environment, and we want to make sure that
 2187                  * CAM retries these commands rather than fail them.
 2188                  */
 2189                 if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
 2190                     (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
 2191                         mpssas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
 2192                         break;
 2193                 }
 2194 
 2195                 /* Handle normal status and sense */
 2196                 csio->scsi_status = rep->SCSIStatus;
 2197                 if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
 2198                         mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
 2199                 else
 2200                         mpssas_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR);
 2201 
 2202                 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
 2203                         int sense_len, returned_sense_len;
 2204 
 2205                         returned_sense_len = min(le32toh(rep->SenseCount),
 2206                             sizeof(struct scsi_sense_data));
 2207                         if (returned_sense_len < ccb->csio.sense_len)
 2208                                 ccb->csio.sense_resid = ccb->csio.sense_len -
 2209                                         returned_sense_len;
 2210                         else
 2211                                 ccb->csio.sense_resid = 0;
 2212 
 2213                         sense_len = min(returned_sense_len,
 2214                             ccb->csio.sense_len - ccb->csio.sense_resid);
 2215                         bzero(&ccb->csio.sense_data,
 2216                               sizeof(ccb->csio.sense_data));
 2217                         bcopy(cm->cm_sense, &ccb->csio.sense_data, sense_len);
 2218                         ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
 2219                 }
 2220 
 2221                 /*
 2222                  * Check if this is an INQUIRY command.  If it's a VPD inquiry,
 2223                  * and it's page code 0 (Supported Page List), and there is
 2224                  * inquiry data, and this is for a sequential access device, and
 2225                  * the device is an SSP target, and TLR is supported by the
 2226                  * controller, turn the TLR_bits value ON if page 0x90 is
 2227                  * supported.
 2228                  */
 2229                 if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
 2230                     (csio->cdb_io.cdb_bytes[1] & SI_EVPD) &&
 2231                     (csio->cdb_io.cdb_bytes[2] == SVPD_SUPPORTED_PAGE_LIST) &&
 2232                     ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
 2233                     (csio->data_ptr != NULL) &&
 2234                     ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) &&
 2235                     (sc->control_TLR) &&
 2236                     (sc->mapping_table[target_id].device_info &
 2237                     MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
 2238                         vpd_list = (struct scsi_vpd_supported_page_list *)
 2239                             csio->data_ptr;
 2240                         TLR_bits = &sc->mapping_table[target_id].TLR_bits;
 2241                         *TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
 2242                         TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
 2243                         alloc_len = ((u16)csio->cdb_io.cdb_bytes[3] << 8) +
 2244                             csio->cdb_io.cdb_bytes[4];
 2245                         alloc_len -= csio->resid;
 2246                         for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
 2247                                 if (vpd_list->list[i] == 0x90) {
 2248                                         *TLR_bits = TLR_on;
 2249                                         break;
 2250                                 }
 2251                         }
 2252                 }
 2253 
 2254                 /*
 2255                  * If this is a SATA direct-access end device, mark it so that
 2256                  * a SCSI StartStopUnit command will be sent to it when the
 2257                  * driver is being shutdown.
 2258                  */
 2259                 if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
 2260                     ((csio->data_ptr[0] & 0x1f) == T_DIRECT) &&
 2261                     (sc->mapping_table[target_id].device_info &
 2262                     MPI2_SAS_DEVICE_INFO_SATA_DEVICE) &&
 2263                     ((sc->mapping_table[target_id].device_info &
 2264                     MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) ==
 2265                     MPI2_SAS_DEVICE_INFO_END_DEVICE)) {
 2266                         target = &sassc->targets[target_id];
 2267                         target->supports_SSU = TRUE;
 2268                         mps_dprint(sc, MPS_XINFO, "Target %d supports SSU\n",
 2269                             target_id);
 2270                 }
 2271                 break;
 2272         case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
 2273         case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
 2274                 /*
 2275                  * If devinfo is 0 this will be a volume.  In that case don't
 2276                  * tell CAM that the volume is not there.  We want volumes to
 2277                  * be enumerated until they are deleted/removed, not just
 2278                  * failed.
 2279                  */
 2280                 if (cm->cm_targ->devinfo == 0)
 2281                         mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
 2282                 else
 2283                         mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
 2284                 break;
 2285         case MPI2_IOCSTATUS_INVALID_SGL:
 2286                 mps_print_scsiio_cmd(sc, cm);
 2287                 mpssas_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR);
 2288                 break;
 2289         case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
 2290                 /*
 2291                  * This is one of the responses that comes back when an I/O
 2292                  * has been aborted.  If it is because of a timeout that we
 2293                  * initiated, just set the status to CAM_CMD_TIMEOUT.
 2294                  * Otherwise set it to CAM_REQ_ABORTED.  The effect on the
 2295                  * command is the same (it gets retried, subject to the
 2296                  * retry counter), the only difference is what gets printed
 2297                  * on the console.
 2298                  */
 2299                 if (cm->cm_flags & MPS_CM_FLAGS_TIMEDOUT)
 2300                         mpssas_set_ccbstatus(ccb, CAM_CMD_TIMEOUT);
 2301                 else
 2302                         mpssas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
 2303                 break;
 2304         case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
 2305                 /* resid is ignored for this condition */
 2306                 csio->resid = 0;
 2307                 mpssas_set_ccbstatus(ccb, CAM_DATA_RUN_ERR);
 2308                 break;
 2309         case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
 2310         case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
 2311                 /*
 2312                  * These can sometimes be transient transport-related
 2313                  * errors, and sometimes persistent drive-related errors.
 2314                  * We used to retry these without decrementing the retry
 2315                  * count by returning CAM_REQUEUE_REQ.  Unfortunately, if
 2316                  * we hit a persistent drive problem that returns one of
 2317                  * these error codes, we would retry indefinitely.  So,
 2318                  * return CAM_REQ_CMP_ERROR so that we decrement the retry
 2319                  * count and avoid infinite retries.  We're taking the
 2320                  * potential risk of flagging false failures in the event
 2321                  * of a topology-related error (e.g. a SAS expander problem
 2322                  * causes a command addressed to a drive to fail), but
 2323                  * avoiding getting into an infinite retry loop. However,
 2324                  * if we get them while were moving a device, we should
 2325                  * fail the request as 'not there' because the device
 2326                  * is effectively gone.
 2327                  */
 2328                 if (cm->cm_targ->flags & MPSSAS_TARGET_INREMOVAL)
 2329                         mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
 2330                 else
 2331                         mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
 2332                 mps_dprint(sc, MPS_INFO,
 2333                     "Controller reported %s tgt %u SMID %u loginfo %x%s\n",
 2334                     mps_describe_table(mps_iocstatus_string,
 2335                     le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK),
 2336                     target_id, cm->cm_desc.Default.SMID,
 2337                     le32toh(rep->IOCLogInfo),
 2338                     (cm->cm_targ->flags & MPSSAS_TARGET_INREMOVAL) ? " departing" : "");
 2339                 mps_dprint(sc, MPS_XINFO,
 2340                     "SCSIStatus %x SCSIState %x xfercount %u\n",
 2341                     rep->SCSIStatus, rep->SCSIState,
 2342                     le32toh(rep->TransferCount));
 2343                 break;
 2344         case MPI2_IOCSTATUS_INVALID_FUNCTION:
 2345         case MPI2_IOCSTATUS_INTERNAL_ERROR:
 2346         case MPI2_IOCSTATUS_INVALID_VPID:
 2347         case MPI2_IOCSTATUS_INVALID_FIELD:
 2348         case MPI2_IOCSTATUS_INVALID_STATE:
 2349         case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
 2350         case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
 2351         case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
 2352         case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
 2353         case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
 2354         default:
 2355                 mpssas_log_command(cm, MPS_XINFO,
 2356                     "completed ioc %x loginfo %x scsi %x state %x xfer %u\n",
 2357                     le16toh(rep->IOCStatus), le32toh(rep->IOCLogInfo),
 2358                     rep->SCSIStatus, rep->SCSIState,
 2359                     le32toh(rep->TransferCount));
 2360                 csio->resid = cm->cm_length;
 2361                 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
 2362                 break;
 2363         }
 2364 
 2365         mps_sc_failed_io_info(sc,csio,rep);
 2366 
 2367         if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
 2368                 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
 2369                 sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
 2370                 mps_dprint(sc, MPS_XINFO, "Command completed, "
 2371                     "unfreezing SIM queue\n");
 2372         }
 2373 
 2374         if (mpssas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
 2375                 ccb->ccb_h.status |= CAM_DEV_QFRZN;
 2376                 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
 2377         }
 2378 
 2379         /*
 2380          * Check to see if we're removing the device. If so, and this is the
 2381          * last command on the queue, proceed with the deferred removal of the
 2382          * device.  Note, for removing a volume, this won't trigger because
 2383          * pending_remove_tm will be NULL.
 2384          */
 2385         if (cm->cm_targ->flags & MPSSAS_TARGET_INREMOVAL) {
 2386                 if (TAILQ_FIRST(&cm->cm_targ->commands) == NULL &&
 2387                     cm->cm_targ->pending_remove_tm != NULL) {
 2388                         mps_dprint(sc, MPS_INFO,
 2389                             "Last pending command complete: starting remove_device target %u handle 0x%04x\n",
 2390                             cm->cm_targ->tid, cm->cm_targ->handle);
 2391                         mps_map_command(sc, cm->cm_targ->pending_remove_tm);
 2392                         cm->cm_targ->pending_remove_tm = NULL;
 2393                 }
 2394         }
 2395 
 2396         mps_free_command(sc, cm);
 2397         xpt_done(ccb);
 2398 }
 2399 
 2400 /* All Request reached here are Endian safe */
 2401 static void
 2402 mpssas_direct_drive_io(struct mpssas_softc *sassc, struct mps_command *cm,
 2403     union ccb *ccb) {
 2404         pMpi2SCSIIORequest_t    pIO_req;
 2405         struct mps_softc        *sc = sassc->sc;
 2406         uint64_t                virtLBA;
 2407         uint32_t                physLBA, stripe_offset, stripe_unit;
 2408         uint32_t                io_size, column;
 2409         uint8_t                 *ptrLBA, lba_idx, physLBA_byte, *CDB;
 2410 
 2411         /*
 2412          * If this is a valid SCSI command (Read6, Read10, Read16, Write6,
 2413          * Write10, or Write16), build a direct I/O message.  Otherwise, the I/O
 2414          * will be sent to the IR volume itself.  Since Read6 and Write6 are a
 2415          * bit different than the 10/16 CDBs, handle them separately.
 2416          */
 2417         pIO_req = (pMpi2SCSIIORequest_t)cm->cm_req;
 2418         CDB = pIO_req->CDB.CDB32;
 2419 
 2420         /*
 2421          * Handle 6 byte CDBs.
 2422          */
 2423         if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_6) ||
 2424             (CDB[0] == WRITE_6))) {
 2425                 /*
 2426                  * Get the transfer size in blocks.
 2427                  */
 2428                 io_size = (cm->cm_length >> sc->DD_block_exponent);
 2429 
 2430                 /*
 2431                  * Get virtual LBA given in the CDB.
 2432                  */
 2433                 virtLBA = ((uint64_t)(CDB[1] & 0x1F) << 16) |
 2434                     ((uint64_t)CDB[2] << 8) | (uint64_t)CDB[3];
 2435 
 2436                 /*
 2437                  * Check that LBA range for I/O does not exceed volume's
 2438                  * MaxLBA.
 2439                  */
 2440                 if ((virtLBA + (uint64_t)io_size - 1) <=
 2441                     sc->DD_max_lba) {
 2442                         /*
 2443                          * Check if the I/O crosses a stripe boundary.  If not,
 2444                          * translate the virtual LBA to a physical LBA and set
 2445                          * the DevHandle for the PhysDisk to be used.  If it
 2446                          * does cross a boundary, do normal I/O.  To get the
 2447                          * right DevHandle to use, get the map number for the
 2448                          * column, then use that map number to look up the
 2449                          * DevHandle of the PhysDisk.
 2450                          */
 2451                         stripe_offset = (uint32_t)virtLBA &
 2452                             (sc->DD_stripe_size - 1);
 2453                         if ((stripe_offset + io_size) <= sc->DD_stripe_size) {
 2454                                 physLBA = (uint32_t)virtLBA >>
 2455                                     sc->DD_stripe_exponent;
 2456                                 stripe_unit = physLBA / sc->DD_num_phys_disks;
 2457                                 column = physLBA % sc->DD_num_phys_disks;
 2458                                 pIO_req->DevHandle =
 2459                                     htole16(sc->DD_column_map[column].dev_handle);
 2460                                 /* ???? Is this endian safe*/
 2461                                 cm->cm_desc.SCSIIO.DevHandle =
 2462                                     pIO_req->DevHandle;
 2463 
 2464                                 physLBA = (stripe_unit <<
 2465                                     sc->DD_stripe_exponent) + stripe_offset;
 2466                                 ptrLBA = &pIO_req->CDB.CDB32[1];
 2467                                 physLBA_byte = (uint8_t)(physLBA >> 16);
 2468                                 *ptrLBA = physLBA_byte;
 2469                                 ptrLBA = &pIO_req->CDB.CDB32[2];
 2470                                 physLBA_byte = (uint8_t)(physLBA >> 8);
 2471                                 *ptrLBA = physLBA_byte;
 2472                                 ptrLBA = &pIO_req->CDB.CDB32[3];
 2473                                 physLBA_byte = (uint8_t)physLBA;
 2474                                 *ptrLBA = physLBA_byte;
 2475 
 2476                                 /*
 2477                                  * Set flag that Direct Drive I/O is
 2478                                  * being done.
 2479                                  */
 2480                                 cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
 2481                         }
 2482                 }
 2483                 return;
 2484         }
 2485 
 2486         /*
 2487          * Handle 10, 12 or 16 byte CDBs.
 2488          */
 2489         if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_10) ||
 2490             (CDB[0] == WRITE_10) || (CDB[0] == READ_16) ||
 2491             (CDB[0] == WRITE_16) || (CDB[0] == READ_12) ||
 2492             (CDB[0] == WRITE_12))) {
 2493                 /*
 2494                  * For 16-byte CDB's, verify that the upper 4 bytes of the CDB
 2495                  * are 0.  If not, this is accessing beyond 2TB so handle it in
 2496                  * the else section.  10-byte and 12-byte CDB's are OK.
 2497                  * FreeBSD sends very rare 12 byte READ/WRITE, but driver is 
 2498                  * ready to accept 12byte CDB for Direct IOs.
 2499                  */
 2500                 if ((CDB[0] == READ_10 || CDB[0] == WRITE_10) ||
 2501                     (CDB[0] == READ_12 || CDB[0] == WRITE_12) ||
 2502                     !(CDB[2] | CDB[3] | CDB[4] | CDB[5])) {
 2503                         /*
 2504                          * Get the transfer size in blocks.
 2505                          */
 2506                         io_size = (cm->cm_length >> sc->DD_block_exponent);
 2507 
 2508                         /*
 2509                          * Get virtual LBA.  Point to correct lower 4 bytes of
 2510                          * LBA in the CDB depending on command.
 2511                          */
 2512                         lba_idx = ((CDB[0] == READ_12) || 
 2513                                 (CDB[0] == WRITE_12) ||
 2514                                 (CDB[0] == READ_10) ||
 2515                                 (CDB[0] == WRITE_10))? 2 : 6;
 2516                         virtLBA = ((uint64_t)CDB[lba_idx] << 24) |
 2517                             ((uint64_t)CDB[lba_idx + 1] << 16) |
 2518                             ((uint64_t)CDB[lba_idx + 2] << 8) |
 2519                             (uint64_t)CDB[lba_idx + 3];
 2520 
 2521                         /*
 2522                          * Check that LBA range for I/O does not exceed volume's
 2523                          * MaxLBA.
 2524                          */
 2525                         if ((virtLBA + (uint64_t)io_size - 1) <=
 2526                             sc->DD_max_lba) {
 2527                                 /*
 2528                                  * Check if the I/O crosses a stripe boundary.
 2529                                  * If not, translate the virtual LBA to a
 2530                                  * physical LBA and set the DevHandle for the
 2531                                  * PhysDisk to be used.  If it does cross a
 2532                                  * boundary, do normal I/O.  To get the right
 2533                                  * DevHandle to use, get the map number for the
 2534                                  * column, then use that map number to look up
 2535                                  * the DevHandle of the PhysDisk.
 2536                                  */
 2537                                 stripe_offset = (uint32_t)virtLBA &
 2538                                     (sc->DD_stripe_size - 1);
 2539                                 if ((stripe_offset + io_size) <=
 2540                                     sc->DD_stripe_size) {
 2541                                         physLBA = (uint32_t)virtLBA >>
 2542                                             sc->DD_stripe_exponent;
 2543                                         stripe_unit = physLBA /
 2544                                             sc->DD_num_phys_disks;
 2545                                         column = physLBA %
 2546                                             sc->DD_num_phys_disks;
 2547                                         pIO_req->DevHandle =
 2548                                             htole16(sc->DD_column_map[column].
 2549                                             dev_handle);
 2550                                         cm->cm_desc.SCSIIO.DevHandle =
 2551                                             pIO_req->DevHandle;
 2552 
 2553                                         physLBA = (stripe_unit <<
 2554                                             sc->DD_stripe_exponent) +
 2555                                             stripe_offset;
 2556                                         ptrLBA =
 2557                                             &pIO_req->CDB.CDB32[lba_idx];
 2558                                         physLBA_byte = (uint8_t)(physLBA >> 24);
 2559                                         *ptrLBA = physLBA_byte;
 2560                                         ptrLBA =
 2561                                             &pIO_req->CDB.CDB32[lba_idx + 1];
 2562                                         physLBA_byte = (uint8_t)(physLBA >> 16);
 2563                                         *ptrLBA = physLBA_byte;
 2564                                         ptrLBA =
 2565                                             &pIO_req->CDB.CDB32[lba_idx + 2];
 2566                                         physLBA_byte = (uint8_t)(physLBA >> 8);
 2567                                         *ptrLBA = physLBA_byte;
 2568                                         ptrLBA =
 2569                                             &pIO_req->CDB.CDB32[lba_idx + 3];
 2570                                         physLBA_byte = (uint8_t)physLBA;
 2571                                         *ptrLBA = physLBA_byte;
 2572 
 2573                                         /*
 2574                                          * Set flag that Direct Drive I/O is
 2575                                          * being done.
 2576                                          */
 2577                                         cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
 2578                                 }
 2579                         }
 2580                 } else {
 2581                         /*
 2582                          * 16-byte CDB and the upper 4 bytes of the CDB are not
 2583                          * 0.  Get the transfer size in blocks.
 2584                          */
 2585                         io_size = (cm->cm_length >> sc->DD_block_exponent);
 2586 
 2587                         /*
 2588                          * Get virtual LBA.
 2589                          */
 2590                         virtLBA = ((uint64_t)CDB[2] << 54) |
 2591                             ((uint64_t)CDB[3] << 48) |
 2592                             ((uint64_t)CDB[4] << 40) |
 2593                             ((uint64_t)CDB[5] << 32) |
 2594                             ((uint64_t)CDB[6] << 24) |
 2595                             ((uint64_t)CDB[7] << 16) |
 2596                             ((uint64_t)CDB[8] << 8) |
 2597                             (uint64_t)CDB[9]; 
 2598 
 2599                         /*
 2600                          * Check that LBA range for I/O does not exceed volume's
 2601                          * MaxLBA.
 2602                          */
 2603                         if ((virtLBA + (uint64_t)io_size - 1) <=
 2604                             sc->DD_max_lba) {
 2605                                 /*
 2606                                  * Check if the I/O crosses a stripe boundary.
 2607                                  * If not, translate the virtual LBA to a
 2608                                  * physical LBA and set the DevHandle for the
 2609                                  * PhysDisk to be used.  If it does cross a
 2610                                  * boundary, do normal I/O.  To get the right
 2611                                  * DevHandle to use, get the map number for the
 2612                                  * column, then use that map number to look up
 2613                                  * the DevHandle of the PhysDisk.
 2614                                  */
 2615                                 stripe_offset = (uint32_t)virtLBA &
 2616                                     (sc->DD_stripe_size - 1);
 2617                                 if ((stripe_offset + io_size) <=
 2618                                     sc->DD_stripe_size) {
 2619                                         physLBA = (uint32_t)(virtLBA >>
 2620                                             sc->DD_stripe_exponent);
 2621                                         stripe_unit = physLBA /
 2622                                             sc->DD_num_phys_disks;
 2623                                         column = physLBA %
 2624                                             sc->DD_num_phys_disks;
 2625                                         pIO_req->DevHandle =
 2626                                             htole16(sc->DD_column_map[column].
 2627                                             dev_handle);
 2628                                         cm->cm_desc.SCSIIO.DevHandle =
 2629                                             pIO_req->DevHandle;
 2630 
 2631                                         physLBA = (stripe_unit <<
 2632                                             sc->DD_stripe_exponent) +
 2633                                             stripe_offset;
 2634 
 2635                                         /*
 2636                                          * Set upper 4 bytes of LBA to 0.  We
 2637                                          * assume that the phys disks are less
 2638                                          * than 2 TB's in size.  Then, set the
 2639                                          * lower 4 bytes.
 2640                                          */
 2641                                         pIO_req->CDB.CDB32[2] = 0;
 2642                                         pIO_req->CDB.CDB32[3] = 0;
 2643                                         pIO_req->CDB.CDB32[4] = 0;
 2644                                         pIO_req->CDB.CDB32[5] = 0;
 2645                                         ptrLBA = &pIO_req->CDB.CDB32[6];
 2646                                         physLBA_byte = (uint8_t)(physLBA >> 24);
 2647                                         *ptrLBA = physLBA_byte;
 2648                                         ptrLBA = &pIO_req->CDB.CDB32[7];
 2649                                         physLBA_byte = (uint8_t)(physLBA >> 16);
 2650                                         *ptrLBA = physLBA_byte;
 2651                                         ptrLBA = &pIO_req->CDB.CDB32[8];
 2652                                         physLBA_byte = (uint8_t)(physLBA >> 8);
 2653                                         *ptrLBA = physLBA_byte;
 2654                                         ptrLBA = &pIO_req->CDB.CDB32[9];
 2655                                         physLBA_byte = (uint8_t)physLBA;
 2656                                         *ptrLBA = physLBA_byte;
 2657 
 2658                                         /*
 2659                                          * Set flag that Direct Drive I/O is
 2660                                          * being done.
 2661                                          */
 2662                                         cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
 2663                                 }
 2664                         }
 2665                 }
 2666         }
 2667 }
 2668 
 2669 static void
 2670 mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm)
 2671 {
 2672         MPI2_SMP_PASSTHROUGH_REPLY *rpl;
 2673         MPI2_SMP_PASSTHROUGH_REQUEST *req;
 2674         uint64_t sasaddr;
 2675         union ccb *ccb;
 2676 
 2677         ccb = cm->cm_complete_data;
 2678 
 2679         /*
 2680          * Currently there should be no way we can hit this case.  It only
 2681          * happens when we have a failure to allocate chain frames, and SMP
 2682          * commands require two S/G elements only.  That should be handled
 2683          * in the standard request size.
 2684          */
 2685         if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
 2686                 mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x on SMP request!\n",
 2687                            __func__, cm->cm_flags);
 2688                 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
 2689                 goto bailout;
 2690         }
 2691 
 2692         rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
 2693         if (rpl == NULL) {
 2694                 mps_dprint(sc, MPS_ERROR, "%s: NULL cm_reply!\n", __func__);
 2695                 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
 2696                 goto bailout;
 2697         }
 2698 
 2699         req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
 2700         sasaddr = le32toh(req->SASAddress.Low);
 2701         sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
 2702 
 2703         if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
 2704             MPI2_IOCSTATUS_SUCCESS ||
 2705             rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
 2706                 mps_dprint(sc, MPS_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
 2707                     __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
 2708                 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
 2709                 goto bailout;
 2710         }
 2711 
 2712         mps_dprint(sc, MPS_XINFO, "%s: SMP request to SAS address "
 2713                    "%#jx completed successfully\n", __func__,
 2714                    (uintmax_t)sasaddr);
 2715 
 2716         if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
 2717                 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
 2718         else
 2719                 mpssas_set_ccbstatus(ccb, CAM_SMP_STATUS_ERROR);
 2720 
 2721 bailout:
 2722         /*
 2723          * We sync in both directions because we had DMAs in the S/G list
 2724          * in both directions.
 2725          */
 2726         bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
 2727                         BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
 2728         bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
 2729         mps_free_command(sc, cm);
 2730         xpt_done(ccb);
 2731 }
 2732 
 2733 static void
 2734 mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
 2735 {
 2736         struct mps_command *cm;
 2737         uint8_t *request, *response;
 2738         MPI2_SMP_PASSTHROUGH_REQUEST *req;
 2739         struct mps_softc *sc;
 2740         int error;
 2741 
 2742         sc = sassc->sc;
 2743         error = 0;
 2744 
 2745         /*
 2746          * XXX We don't yet support physical addresses here.
 2747          */
 2748         switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) {
 2749         case CAM_DATA_PADDR:
 2750         case CAM_DATA_SG_PADDR:
 2751                 mps_dprint(sc, MPS_ERROR,
 2752                            "%s: physical addresses not supported\n", __func__);
 2753                 mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
 2754                 xpt_done(ccb);
 2755                 return;
 2756         case CAM_DATA_SG:
 2757                 /*
 2758                  * The chip does not support more than one buffer for the
 2759                  * request or response.
 2760                  */
 2761                 if ((ccb->smpio.smp_request_sglist_cnt > 1)
 2762                   || (ccb->smpio.smp_response_sglist_cnt > 1)) {
 2763                         mps_dprint(sc, MPS_ERROR,
 2764                                    "%s: multiple request or response "
 2765                                    "buffer segments not supported for SMP\n",
 2766                                    __func__);
 2767                         mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
 2768                         xpt_done(ccb);
 2769                         return;
 2770                 }
 2771 
 2772                 /*
 2773                  * The CAM_SCATTER_VALID flag was originally implemented
 2774                  * for the XPT_SCSI_IO CCB, which only has one data pointer.
 2775                  * We have two.  So, just take that flag to mean that we
 2776                  * might have S/G lists, and look at the S/G segment count
 2777                  * to figure out whether that is the case for each individual
 2778                  * buffer.
 2779                  */
 2780                 if (ccb->smpio.smp_request_sglist_cnt != 0) {
 2781                         bus_dma_segment_t *req_sg;
 2782 
 2783                         req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
 2784                         request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
 2785                 } else
 2786                         request = ccb->smpio.smp_request;
 2787 
 2788                 if (ccb->smpio.smp_response_sglist_cnt != 0) {
 2789                         bus_dma_segment_t *rsp_sg;
 2790 
 2791                         rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
 2792                         response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
 2793                 } else
 2794                         response = ccb->smpio.smp_response;
 2795                 break;
 2796         case CAM_DATA_VADDR:
 2797                 request = ccb->smpio.smp_request;
 2798                 response = ccb->smpio.smp_response;
 2799                 break;
 2800         default:
 2801                 mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
 2802                 xpt_done(ccb);
 2803                 return;
 2804         }
 2805 
 2806         cm = mps_alloc_command(sc);
 2807         if (cm == NULL) {
 2808                 mps_dprint(sc, MPS_ERROR,
 2809                     "%s: cannot allocate command\n", __func__);
 2810                 mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
 2811                 xpt_done(ccb);
 2812                 return;
 2813         }
 2814 
 2815         req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
 2816         bzero(req, sizeof(*req));
 2817         req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
 2818 
 2819         /* Allow the chip to use any route to this SAS address. */
 2820         req->PhysicalPort = 0xff;
 2821 
 2822         req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
 2823         req->SGLFlags = 
 2824             MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
 2825 
 2826         mps_dprint(sc, MPS_XINFO, "%s: sending SMP request to SAS "
 2827             "address %#jx\n", __func__, (uintmax_t)sasaddr);
 2828 
 2829         mpi_init_sge(cm, req, &req->SGL);
 2830 
 2831         /*
 2832          * Set up a uio to pass into mps_map_command().  This allows us to
 2833          * do one map command, and one busdma call in there.
 2834          */
 2835         cm->cm_uio.uio_iov = cm->cm_iovec;
 2836         cm->cm_uio.uio_iovcnt = 2;
 2837         cm->cm_uio.uio_segflg = UIO_SYSSPACE;
 2838 
 2839         /*
 2840          * The read/write flag isn't used by busdma, but set it just in
 2841          * case.  This isn't exactly accurate, either, since we're going in
 2842          * both directions.
 2843          */
 2844         cm->cm_uio.uio_rw = UIO_WRITE;
 2845 
 2846         cm->cm_iovec[0].iov_base = request;
 2847         cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
 2848         cm->cm_iovec[1].iov_base = response;
 2849         cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
 2850 
 2851         cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
 2852                                cm->cm_iovec[1].iov_len;
 2853 
 2854         /*
 2855          * Trigger a warning message in mps_data_cb() for the user if we
 2856          * wind up exceeding two S/G segments.  The chip expects one
 2857          * segment for the request and another for the response.
 2858          */
 2859         cm->cm_max_segs = 2;
 2860 
 2861         cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
 2862         cm->cm_complete = mpssas_smpio_complete;
 2863         cm->cm_complete_data = ccb;
 2864 
 2865         /*
 2866          * Tell the mapping code that we're using a uio, and that this is
 2867          * an SMP passthrough request.  There is a little special-case
 2868          * logic there (in mps_data_cb()) to handle the bidirectional
 2869          * transfer.  
 2870          */
 2871         cm->cm_flags |= MPS_CM_FLAGS_USE_UIO | MPS_CM_FLAGS_SMP_PASS |
 2872                         MPS_CM_FLAGS_DATAIN | MPS_CM_FLAGS_DATAOUT;
 2873 
 2874         /* The chip data format is little endian. */
 2875         req->SASAddress.High = htole32(sasaddr >> 32);
 2876         req->SASAddress.Low = htole32(sasaddr);
 2877 
 2878         /*
 2879          * XXX Note that we don't have a timeout/abort mechanism here.
 2880          * From the manual, it looks like task management requests only
 2881          * work for SCSI IO and SATA passthrough requests.  We may need to
 2882          * have a mechanism to retry requests in the event of a chip reset
 2883          * at least.  Hopefully the chip will insure that any errors short
 2884          * of that are relayed back to the driver.
 2885          */
 2886         error = mps_map_command(sc, cm);
 2887         if ((error != 0) && (error != EINPROGRESS)) {
 2888                 mps_dprint(sc, MPS_ERROR,
 2889                            "%s: error %d returned from mps_map_command()\n",
 2890                            __func__, error);
 2891                 goto bailout_error;
 2892         }
 2893 
 2894         return;
 2895 
 2896 bailout_error:
 2897         mps_free_command(sc, cm);
 2898         mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
 2899         xpt_done(ccb);
 2900         return;
 2901 
 2902 }
 2903 
 2904 static void
 2905 mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb)
 2906 {
 2907         struct mps_softc *sc;
 2908         struct mpssas_target *targ;
 2909         uint64_t sasaddr = 0;
 2910 
 2911         sc = sassc->sc;
 2912 
 2913         /*
 2914          * Make sure the target exists.
 2915          */
 2916         KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
 2917             ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id));
 2918         targ = &sassc->targets[ccb->ccb_h.target_id];
 2919         if (targ->handle == 0x0) {
 2920                 mps_dprint(sc, MPS_ERROR,
 2921                            "%s: target %d does not exist!\n", __func__,
 2922                            ccb->ccb_h.target_id);
 2923                 mpssas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
 2924                 xpt_done(ccb);
 2925                 return;
 2926         }
 2927 
 2928         /*
 2929          * If this device has an embedded SMP target, we'll talk to it
 2930          * directly.
 2931          * figure out what the expander's address is.
 2932          */
 2933         if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
 2934                 sasaddr = targ->sasaddr;
 2935 
 2936         /*
 2937          * If we don't have a SAS address for the expander yet, try
 2938          * grabbing it from the page 0x83 information cached in the
 2939          * transport layer for this target.  LSI expanders report the
 2940          * expander SAS address as the port-associated SAS address in
 2941          * Inquiry VPD page 0x83.  Maxim expanders don't report it in page
 2942          * 0x83.
 2943          *
 2944          * XXX KDM disable this for now, but leave it commented out so that
 2945          * it is obvious that this is another possible way to get the SAS
 2946          * address.
 2947          *
 2948          * The parent handle method below is a little more reliable, and
 2949          * the other benefit is that it works for devices other than SES
 2950          * devices.  So you can send a SMP request to a da(4) device and it
 2951          * will get routed to the expander that device is attached to.
 2952          * (Assuming the da(4) device doesn't contain an SMP target...)
 2953          */
 2954 #if 0
 2955         if (sasaddr == 0)
 2956                 sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
 2957 #endif
 2958 
 2959         /*
 2960          * If we still don't have a SAS address for the expander, look for
 2961          * the parent device of this device, which is probably the expander.
 2962          */
 2963         if (sasaddr == 0) {
 2964 #ifdef OLD_MPS_PROBE
 2965                 struct mpssas_target *parent_target;
 2966 #endif
 2967 
 2968                 if (targ->parent_handle == 0x0) {
 2969                         mps_dprint(sc, MPS_ERROR,
 2970                                    "%s: handle %d does not have a valid "
 2971                                    "parent handle!\n", __func__, targ->handle);
 2972                         mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
 2973                         goto bailout;
 2974                 }
 2975 #ifdef OLD_MPS_PROBE
 2976                 parent_target = mpssas_find_target_by_handle(sassc, 0,
 2977                         targ->parent_handle);
 2978 
 2979                 if (parent_target == NULL) {
 2980                         mps_dprint(sc, MPS_ERROR,
 2981                                    "%s: handle %d does not have a valid "
 2982                                    "parent target!\n", __func__, targ->handle);
 2983                         mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
 2984                         goto bailout;
 2985                 }
 2986 
 2987                 if ((parent_target->devinfo &
 2988                      MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
 2989                         mps_dprint(sc, MPS_ERROR,
 2990                                    "%s: handle %d parent %d does not "
 2991                                    "have an SMP target!\n", __func__,
 2992                                    targ->handle, parent_target->handle);
 2993                         mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
 2994                         goto bailout;
 2995                 }
 2996 
 2997                 sasaddr = parent_target->sasaddr;
 2998 #else /* OLD_MPS_PROBE */
 2999                 if ((targ->parent_devinfo &
 3000                      MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
 3001                         mps_dprint(sc, MPS_ERROR,
 3002                                    "%s: handle %d parent %d does not "
 3003                                    "have an SMP target!\n", __func__,
 3004                                    targ->handle, targ->parent_handle);
 3005                         mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
 3006                         goto bailout;
 3007                 }
 3008                 if (targ->parent_sasaddr == 0x0) {
 3009                         mps_dprint(sc, MPS_ERROR,
 3010                                    "%s: handle %d parent handle %d does "
 3011                                    "not have a valid SAS address!\n",
 3012                                    __func__, targ->handle, targ->parent_handle);
 3013                         mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
 3014                         goto bailout;
 3015                 }
 3016 
 3017                 sasaddr = targ->parent_sasaddr;
 3018 #endif /* OLD_MPS_PROBE */
 3019         }
 3020 
 3021         if (sasaddr == 0) {
 3022                 mps_dprint(sc, MPS_INFO,
 3023                            "%s: unable to find SAS address for handle %d\n",
 3024                            __func__, targ->handle);
 3025                 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
 3026                 goto bailout;
 3027         }
 3028         mpssas_send_smpcmd(sassc, ccb, sasaddr);
 3029 
 3030         return;
 3031 
 3032 bailout:
 3033         xpt_done(ccb);
 3034 
 3035 }
 3036 
 3037 static void
 3038 mpssas_action_resetdev(struct mpssas_softc *sassc, union ccb *ccb)
 3039 {
 3040         MPI2_SCSI_TASK_MANAGE_REQUEST *req;
 3041         struct mps_softc *sc;
 3042         struct mps_command *tm;
 3043         struct mpssas_target *targ;
 3044 
 3045         MPS_FUNCTRACE(sassc->sc);
 3046         mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
 3047 
 3048         KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
 3049             ("Target %d out of bounds in XPT_RESET_DEV\n",
 3050              ccb->ccb_h.target_id));
 3051         sc = sassc->sc;
 3052         tm = mpssas_alloc_tm(sc);
 3053         if (tm == NULL) {
 3054                 mps_dprint(sc, MPS_ERROR,
 3055                     "command alloc failure in mpssas_action_resetdev\n");
 3056                 mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
 3057                 xpt_done(ccb);
 3058                 return;
 3059         }
 3060 
 3061         targ = &sassc->targets[ccb->ccb_h.target_id];
 3062         req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
 3063         req->DevHandle = htole16(targ->handle);
 3064         req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
 3065 
 3066         /* SAS Hard Link Reset / SATA Link Reset */
 3067         req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
 3068 
 3069         tm->cm_data = NULL;
 3070         tm->cm_complete = mpssas_resetdev_complete;
 3071         tm->cm_complete_data = ccb;
 3072         tm->cm_targ = targ;
 3073 
 3074         mpssas_prepare_for_tm(sc, tm, targ, CAM_LUN_WILDCARD);
 3075         mps_map_command(sc, tm);
 3076 }
 3077 
 3078 static void
 3079 mpssas_resetdev_complete(struct mps_softc *sc, struct mps_command *tm)
 3080 {
 3081         MPI2_SCSI_TASK_MANAGE_REPLY *resp;
 3082         union ccb *ccb;
 3083 
 3084         MPS_FUNCTRACE(sc);
 3085         mtx_assert(&sc->mps_mtx, MA_OWNED);
 3086 
 3087         resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
 3088         ccb = tm->cm_complete_data;
 3089 
 3090         /*
 3091          * Currently there should be no way we can hit this case.  It only
 3092          * happens when we have a failure to allocate chain frames, and
 3093          * task management commands don't have S/G lists.
 3094          */
 3095         if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
 3096                 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
 3097 
 3098                 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
 3099 
 3100                 mps_dprint(sc, MPS_ERROR,
 3101                            "%s: cm_flags = %#x for reset of handle %#04x! "
 3102                            "This should not happen!\n", __func__, tm->cm_flags,
 3103                            req->DevHandle);
 3104                 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
 3105                 goto bailout;
 3106         }
 3107 
 3108         mps_dprint(sc, MPS_XINFO,
 3109             "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n", __func__,
 3110             le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
 3111 
 3112         if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
 3113                 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
 3114                 mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
 3115                     CAM_LUN_WILDCARD);
 3116         }
 3117         else
 3118                 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
 3119 
 3120 bailout:
 3121 
 3122         mpssas_free_tm(sc, tm);
 3123         xpt_done(ccb);
 3124 }
 3125 
 3126 static void
 3127 mpssas_poll(struct cam_sim *sim)
 3128 {
 3129         struct mpssas_softc *sassc;
 3130 
 3131         sassc = cam_sim_softc(sim);
 3132 
 3133         if (sassc->sc->mps_debug & MPS_TRACE) {
 3134                 /* frequent debug messages during a panic just slow
 3135                  * everything down too much.
 3136                  */
 3137                 mps_printf(sassc->sc, "%s clearing MPS_TRACE\n", __func__);
 3138                 sassc->sc->mps_debug &= ~MPS_TRACE;
 3139         }
 3140 
 3141         mps_intr_locked(sassc->sc);
 3142 }
 3143 
 3144 static void
 3145 mpssas_async(void *callback_arg, uint32_t code, struct cam_path *path,
 3146              void *arg)
 3147 {
 3148         struct mps_softc *sc;
 3149 
 3150         sc = (struct mps_softc *)callback_arg;
 3151 
 3152         mps_lock(sc);
 3153         switch (code) {
 3154         case AC_ADVINFO_CHANGED: {
 3155                 struct mpssas_target *target;
 3156                 struct mpssas_softc *sassc;
 3157                 struct scsi_read_capacity_data_long rcap_buf;
 3158                 struct ccb_dev_advinfo cdai;
 3159                 struct mpssas_lun *lun;
 3160                 lun_id_t lunid;
 3161                 int found_lun;
 3162                 uintptr_t buftype;
 3163 
 3164                 buftype = (uintptr_t)arg;
 3165 
 3166                 found_lun = 0;
 3167                 sassc = sc->sassc;
 3168 
 3169                 /*
 3170                  * We're only interested in read capacity data changes.
 3171                  */
 3172                 if (buftype != CDAI_TYPE_RCAPLONG)
 3173                         break;
 3174 
 3175                 /*
 3176                  * We should have a handle for this, but check to make sure.
 3177                  */
 3178                 KASSERT(xpt_path_target_id(path) < sassc->maxtargets,
 3179                     ("Target %d out of bounds in mpssas_async\n",
 3180                     xpt_path_target_id(path)));
 3181                 target = &sassc->targets[xpt_path_target_id(path)];
 3182                 if (target->handle == 0)
 3183                         break;
 3184 
 3185                 lunid = xpt_path_lun_id(path);
 3186 
 3187                 SLIST_FOREACH(lun, &target->luns, lun_link) {
 3188                         if (lun->lun_id == lunid) {
 3189                                 found_lun = 1;
 3190                                 break;
 3191                         }
 3192                 }
 3193 
 3194                 if (found_lun == 0) {
 3195                         lun = malloc(sizeof(struct mpssas_lun), M_MPT2,
 3196                                      M_NOWAIT | M_ZERO);
 3197                         if (lun == NULL) {
 3198                                 mps_dprint(sc, MPS_ERROR, "Unable to alloc "
 3199                                            "LUN for EEDP support.\n");
 3200                                 break;
 3201                         }
 3202                         lun->lun_id = lunid;
 3203                         SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
 3204                 }
 3205 
 3206                 bzero(&rcap_buf, sizeof(rcap_buf));
 3207                 bzero(&cdai, sizeof(cdai));
 3208                 xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
 3209                 cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
 3210                 cdai.ccb_h.flags = CAM_DIR_IN;
 3211                 cdai.buftype = CDAI_TYPE_RCAPLONG;
 3212                 cdai.flags = CDAI_FLAG_NONE;
 3213                 cdai.bufsiz = sizeof(rcap_buf);
 3214                 cdai.buf = (uint8_t *)&rcap_buf;
 3215                 xpt_action((union ccb *)&cdai);
 3216                 if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
 3217                         cam_release_devq(cdai.ccb_h.path,
 3218                                          0, 0, 0, FALSE);
 3219 
 3220                 if ((mpssas_get_ccbstatus((union ccb *)&cdai) == CAM_REQ_CMP)
 3221                  && (rcap_buf.prot & SRC16_PROT_EN)) {
 3222                         switch (rcap_buf.prot & SRC16_P_TYPE) {
 3223                         case SRC16_PTYPE_1:
 3224                         case SRC16_PTYPE_3:
 3225                                 lun->eedp_formatted = TRUE;
 3226                                 lun->eedp_block_size =
 3227                                     scsi_4btoul(rcap_buf.length);
 3228                                 break;
 3229                         case SRC16_PTYPE_2:
 3230                         default:
 3231                                 lun->eedp_formatted = FALSE;
 3232                                 lun->eedp_block_size = 0;
 3233                                 break;
 3234                         }
 3235                 } else {
 3236                         lun->eedp_formatted = FALSE;
 3237                         lun->eedp_block_size = 0;
 3238                 }
 3239                 break;
 3240         }
 3241         default:
 3242                 break;
 3243         }
 3244         mps_unlock(sc);
 3245 }
 3246 
 3247 /*
 3248  * Freeze the devq and set the INRESET flag so that no I/O will be sent to
 3249  * the target until the reset has completed.  The CCB holds the path which
 3250  * is used to release the devq.  The devq is released and the CCB is freed
 3251  * when the TM completes.
 3252  * We only need to do this when we're entering reset, not at each time we
 3253  * need to send an abort (which will happen if multiple commands timeout
 3254  * while we're sending the abort). We do not release the queue for each
 3255  * command we complete (just at the end when we free the tm), so freezing
 3256  * it each time doesn't make sense.
 3257  */
 3258 void
 3259 mpssas_prepare_for_tm(struct mps_softc *sc, struct mps_command *tm,
 3260     struct mpssas_target *target, lun_id_t lun_id)
 3261 {
 3262         union ccb *ccb;
 3263         path_id_t path_id;
 3264 
 3265         ccb = xpt_alloc_ccb_nowait();
 3266         if (ccb) {
 3267                 path_id = cam_sim_path(sc->sassc->sim);
 3268                 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, path_id,
 3269                     target->tid, lun_id) != CAM_REQ_CMP) {
 3270                         xpt_free_ccb(ccb);
 3271                 } else {
 3272                         tm->cm_ccb = ccb;
 3273                         tm->cm_targ = target;
 3274                         if ((target->flags & MPSSAS_TARGET_INRESET) == 0) {
 3275                                 mps_dprint(sc, MPS_XINFO | MPS_RECOVERY,
 3276                                     "%s: Freezing devq for target ID %d\n",
 3277                                     __func__, target->tid);
 3278                                 xpt_freeze_devq(ccb->ccb_h.path, 1);
 3279                                 target->flags |= MPSSAS_TARGET_INRESET;
 3280                         }
 3281                 }
 3282         }
 3283 }
 3284 
 3285 int
 3286 mpssas_startup(struct mps_softc *sc)
 3287 {
 3288 
 3289         /*
 3290          * Send the port enable message and set the wait_for_port_enable flag.
 3291          * This flag helps to keep the simq frozen until all discovery events
 3292          * are processed.
 3293          */
 3294         sc->wait_for_port_enable = 1;
 3295         mpssas_send_portenable(sc);
 3296         return (0);
 3297 }
 3298 
 3299 static int
 3300 mpssas_send_portenable(struct mps_softc *sc)
 3301 {
 3302         MPI2_PORT_ENABLE_REQUEST *request;
 3303         struct mps_command *cm;
 3304 
 3305         MPS_FUNCTRACE(sc);
 3306 
 3307         if ((cm = mps_alloc_command(sc)) == NULL)
 3308                 return (EBUSY);
 3309         request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
 3310         request->Function = MPI2_FUNCTION_PORT_ENABLE;
 3311         request->MsgFlags = 0;
 3312         request->VP_ID = 0;
 3313         cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
 3314         cm->cm_complete = mpssas_portenable_complete;
 3315         cm->cm_data = NULL;
 3316         cm->cm_sge = NULL;
 3317 
 3318         mps_map_command(sc, cm);
 3319         mps_dprint(sc, MPS_XINFO, 
 3320             "mps_send_portenable finished cm %p req %p complete %p\n",
 3321             cm, cm->cm_req, cm->cm_complete);
 3322         return (0);
 3323 }
 3324 
 3325 static void
 3326 mpssas_portenable_complete(struct mps_softc *sc, struct mps_command *cm)
 3327 {
 3328         MPI2_PORT_ENABLE_REPLY *reply;
 3329         struct mpssas_softc *sassc;
 3330 
 3331         MPS_FUNCTRACE(sc);
 3332         sassc = sc->sassc;
 3333 
 3334         /*
 3335          * Currently there should be no way we can hit this case.  It only
 3336          * happens when we have a failure to allocate chain frames, and
 3337          * port enable commands don't have S/G lists.
 3338          */
 3339         if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
 3340                 mps_dprint(sc, MPS_ERROR, "%s: cm_flags = %#x for port enable! "
 3341                            "This should not happen!\n", __func__, cm->cm_flags);
 3342         }
 3343 
 3344         reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
 3345         if (reply == NULL)
 3346                 mps_dprint(sc, MPS_FAULT, "Portenable NULL reply\n");
 3347         else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
 3348             MPI2_IOCSTATUS_SUCCESS)
 3349                 mps_dprint(sc, MPS_FAULT, "Portenable failed\n");
 3350 
 3351         mps_free_command(sc, cm);
 3352 
 3353         /*
 3354          * Get WarpDrive info after discovery is complete but before the scan
 3355          * starts.  At this point, all devices are ready to be exposed to the
 3356          * OS.  If devices should be hidden instead, take them out of the
 3357          * 'targets' array before the scan.  The devinfo for a disk will have
 3358          * some info and a volume's will be 0.  Use that to remove disks.
 3359          */
 3360         mps_wd_config_pages(sc);
 3361 
 3362         /*
 3363          * Done waiting for port enable to complete.  Decrement the refcount.
 3364          * If refcount is 0, discovery is complete and a rescan of the bus can
 3365          * take place.  Since the simq was explicitly frozen before port
 3366          * enable, it must be explicitly released here to keep the
 3367          * freeze/release count in sync.
 3368          */
 3369         sc->wait_for_port_enable = 0;
 3370         sc->port_enable_complete = 1;
 3371         wakeup(&sc->port_enable_complete);
 3372         mpssas_startup_decrement(sassc);
 3373 }
 3374 
 3375 int
 3376 mpssas_check_id(struct mpssas_softc *sassc, int id)
 3377 {
 3378         struct mps_softc *sc = sassc->sc;
 3379         char *ids;
 3380         char *name;
 3381 
 3382         ids = &sc->exclude_ids[0];
 3383         while((name = strsep(&ids, ",")) != NULL) {
 3384                 if (name[0] == '\0')
 3385                         continue;
 3386                 if (strtol(name, NULL, 0) == (long)id)
 3387                         return (1);
 3388         }
 3389 
 3390         return (0);
 3391 }
 3392 
 3393 void
 3394 mpssas_realloc_targets(struct mps_softc *sc, int maxtargets)
 3395 {
 3396         struct mpssas_softc *sassc;
 3397         struct mpssas_lun *lun, *lun_tmp;
 3398         struct mpssas_target *targ;
 3399         int i;
 3400 
 3401         sassc = sc->sassc;
 3402         /*
 3403          * The number of targets is based on IOC Facts, so free all of
 3404          * the allocated LUNs for each target and then the target buffer
 3405          * itself.
 3406          */
 3407         for (i=0; i< maxtargets; i++) {
 3408                 targ = &sassc->targets[i];
 3409                 SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
 3410                         free(lun, M_MPT2);
 3411                 }
 3412         }
 3413         free(sassc->targets, M_MPT2);
 3414 
 3415         sassc->targets = malloc(sizeof(struct mpssas_target) * maxtargets,
 3416             M_MPT2, M_WAITOK|M_ZERO);
 3417 }

Cache object: 4551bad720d9f0c78a61828ef02d2826


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.