The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/raid/mps/mps_sas.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2009 Yahoo! Inc.
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  *
   14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   24  * SUCH DAMAGE.
   25  */
   26 /*-
   27  * Copyright (c) 2011 LSI Corp.
   28  * All rights reserved.
   29  *
   30  * Redistribution and use in source and binary forms, with or without
   31  * modification, are permitted provided that the following conditions
   32  * are met:
   33  * 1. Redistributions of source code must retain the above copyright
   34  *    notice, this list of conditions and the following disclaimer.
   35  * 2. Redistributions in binary form must reproduce the above copyright
   36  *    notice, this list of conditions and the following disclaimer in the
   37  *    documentation and/or other materials provided with the distribution.
   38  *
   39  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   40  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   41  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   42  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   43  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   44  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   45  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   46  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   47  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   48  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   49  * SUCH DAMAGE.
   50  *
   51  * LSI MPT-Fusion Host Adapter FreeBSD
   52  *
   53  * $FreeBSD: src/sys/dev/mps/mps_sas.c,v 1.16 2012/01/26 18:17:21 ken Exp $
   54  */
   55 
   56 /* Communications core for LSI MPT2 */
   57 
   58 /* TODO Move headers to mpsvar */
   59 #include <sys/types.h>
   60 #include <sys/param.h>
   61 #include <sys/systm.h>
   62 #include <sys/kernel.h>
   63 #include <sys/module.h>
   64 #include <sys/bus.h>
   65 #include <sys/conf.h>
   66 #include <sys/eventhandler.h>
   67 #include <sys/globaldata.h>
   68 #include <sys/bio.h>
   69 #include <sys/malloc.h>
   70 #include <sys/uio.h>
   71 #include <sys/sysctl.h>
   72 #include <sys/endian.h>
   73 #include <sys/queue.h>
   74 #include <sys/kthread.h>
   75 #include <sys/taskqueue.h>
   76 #include <sys/sbuf.h>
   77 
   78 #include <sys/rman.h>
   79 
   80 #include <machine/stdarg.h>
   81 
   82 #include <bus/cam/cam.h>
   83 #include <bus/cam/cam_ccb.h>
   84 #include <bus/cam/cam_xpt.h>
   85 #include <bus/cam/cam_debug.h>
   86 #include <bus/cam/cam_sim.h>
   87 #include <bus/cam/cam_xpt_sim.h>
   88 #include <bus/cam/cam_xpt_periph.h>
   89 #include <bus/cam/cam_periph.h>
   90 #include <bus/cam/scsi/scsi_all.h>
   91 #include <bus/cam/scsi/scsi_message.h>
   92 #if 0 /* XXX __FreeBSD_version >= 900026 */
   93 #include <bus/cam/scsi/smp_all.h>
   94 #endif
   95 
   96 #include <dev/raid/mps/mpi/mpi2_type.h>
   97 #include <dev/raid/mps/mpi/mpi2.h>
   98 #include <dev/raid/mps/mpi/mpi2_ioc.h>
   99 #include <dev/raid/mps/mpi/mpi2_sas.h>
  100 #include <dev/raid/mps/mpi/mpi2_cnfg.h>
  101 #include <dev/raid/mps/mpi/mpi2_init.h>
  102 #include <dev/raid/mps/mpi/mpi2_tool.h>
  103 #include <dev/raid/mps/mps_ioctl.h>
  104 #include <dev/raid/mps/mpsvar.h>
  105 #include <dev/raid/mps/mps_table.h>
  106 #include <dev/raid/mps/mps_sas.h>
  107 
  108 #define MPSSAS_DISCOVERY_TIMEOUT        20
  109 #define MPSSAS_MAX_DISCOVERY_TIMEOUTS   10 /* 200 seconds */
  110 
  111 /*
  112  * static array to check SCSI OpCode for EEDP protection bits
  113  */
  114 #define PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
  115 #define PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
  116 #define PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
  117 static uint8_t op_code_prot[256] = {
  118         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  119         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  120         0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
  121         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  122         0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  123         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  124         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  125         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  126         0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
  127         0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  128         0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
  129         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  130         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  131         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  132         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  133         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
  134 };
  135 
  136 static void mpssas_log_command(struct mps_command *, const char *, ...)
  137                 __printflike(2, 3);
  138 #if 0 /* XXX unused */
  139 static void mpssas_discovery_timeout(void *data);
  140 #endif
  141 static void mpssas_remove_device(struct mps_softc *, struct mps_command *);
  142 static void mpssas_remove_complete(struct mps_softc *, struct mps_command *);
  143 static void mpssas_action(struct cam_sim *sim, union ccb *ccb);
  144 static void mpssas_poll(struct cam_sim *sim);
  145 static void mpssas_scsiio_timeout(void *data);
  146 static void mpssas_abort_complete(struct mps_softc *sc, struct mps_command *cm);
  147 static void mpssas_direct_drive_io(struct mpssas_softc *sassc,
  148     struct mps_command *cm, union ccb *ccb);
  149 static void mpssas_action_scsiio(struct mpssas_softc *, union ccb *);
  150 static void mpssas_scsiio_complete(struct mps_softc *, struct mps_command *);
  151 static void mpssas_action_resetdev(struct mpssas_softc *, union ccb *);
  152 #if __FreeBSD_version >= 900026
  153 static void mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm);
  154 static void mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb,
  155                                uint64_t sasaddr);
  156 static void mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb);
  157 #endif //FreeBSD_version >= 900026
  158 static void mpssas_resetdev_complete(struct mps_softc *, struct mps_command *);
  159 static int  mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm);
  160 static int  mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type);
  161 static void mpssas_rescan(struct mpssas_softc *sassc, union ccb *ccb);
  162 static void mpssas_rescan_done(struct cam_periph *periph, union ccb *done_ccb);
  163 static void mpssas_scanner_thread(void *arg);
  164 #if __FreeBSD_version >= 1000006
  165 static void mpssas_async(void *callback_arg, uint32_t code,
  166                          struct cam_path *path, void *arg);
  167 #else
  168 static void mpssas_check_eedp(struct mpssas_softc *sassc);
  169 static void mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb);
  170 #endif
  171 static int mpssas_send_portenable(struct mps_softc *sc);
  172 static void mpssas_portenable_complete(struct mps_softc *sc,
  173     struct mps_command *cm);
  174 
  175 struct mpssas_target *
  176 mpssas_find_target_by_handle(struct mpssas_softc *sassc, int start, uint16_t handle)
  177 {
  178         struct mpssas_target *target;
  179         int i;
  180 
  181         for (i = start; i < sassc->sc->facts->MaxTargets; i++) {
  182                 target = &sassc->targets[i];
  183                 if (target->handle == handle)
  184                         return (target);
  185         }
  186 
  187         return (NULL);
  188 }
  189 
  190 /* we need to freeze the simq during attach and diag reset, to avoid failing
  191  * commands before device handles have been found by discovery.  Since
  192  * discovery involves reading config pages and possibly sending commands,
  193  * discovery actions may continue even after we receive the end of discovery
  194  * event, so refcount discovery actions instead of assuming we can unfreeze
  195  * the simq when we get the event.
  196  */
  197 void
  198 mpssas_startup_increment(struct mpssas_softc *sassc)
  199 {
  200         if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
  201                 if (sassc->startup_refcount++ == 0) {
  202                         /* just starting, freeze the simq */
  203                         mps_dprint(sassc->sc, MPS_INFO,
  204                             "%s freezing simq\n", __func__);
  205                         xpt_freeze_simq(sassc->sim, 1);
  206                 }
  207                 mps_dprint(sassc->sc, MPS_TRACE, "%s refcount %u\n", __func__,
  208                     sassc->startup_refcount);
  209         }
  210 }
  211 
  212 void
  213 mpssas_startup_decrement(struct mpssas_softc *sassc)
  214 {
  215         if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
  216                 if (--sassc->startup_refcount == 0) {
  217                         /* finished all discovery-related actions, release
  218                          * the simq and rescan for the latest topology.
  219                          */
  220                         mps_dprint(sassc->sc, MPS_INFO,
  221                             "%s releasing simq\n", __func__);
  222                         sassc->flags &= ~MPSSAS_IN_STARTUP;
  223                         xpt_release_simq(sassc->sim, 1);
  224                         mpssas_rescan_target(sassc->sc, NULL);
  225                 }
  226                 mps_dprint(sassc->sc, MPS_TRACE, "%s refcount %u\n", __func__,
  227                     sassc->startup_refcount);
  228         }
  229 }
  230 
  231 /* LSI's firmware requires us to stop sending commands when we're doing task
  232  * management, so refcount the TMs and keep the simq frozen when any are in
  233  * use.
  234  */
  235 struct mps_command *
  236 mpssas_alloc_tm(struct mps_softc *sc)
  237 {
  238         struct mps_command *tm;
  239 
  240         tm = mps_alloc_high_priority_command(sc);
  241         if (tm != NULL) {
  242                 if (sc->sassc->tm_count++ == 0) {
  243                         mps_printf(sc, "%s freezing simq\n", __func__);
  244                         xpt_freeze_simq(sc->sassc->sim, 1);
  245                 }
  246                 mps_dprint(sc, MPS_TRACE, "%s tm_count %u\n", __func__,
  247                     sc->sassc->tm_count);
  248         }
  249         return tm;
  250 }
  251 
  252 void
  253 mpssas_free_tm(struct mps_softc *sc, struct mps_command *tm)
  254 {
  255         if (tm == NULL)
  256                 return;
  257 
  258         /* if there are no TMs in use, we can release the simq.  We use our
  259          * own refcount so that it's easier for a diag reset to cleanup and
  260          * release the simq.
  261          */
  262         if (--sc->sassc->tm_count == 0) {
  263                 mps_printf(sc, "%s releasing simq\n", __func__);
  264                 xpt_release_simq(sc->sassc->sim, 1);
  265         }
  266         mps_dprint(sc, MPS_TRACE, "%s tm_count %u\n", __func__,
  267             sc->sassc->tm_count);
  268 
  269         mps_free_high_priority_command(sc, tm);
  270 }
  271 
  272 
  273 void
  274 mpssas_rescan_target(struct mps_softc *sc, struct mpssas_target *targ)
  275 {
  276         struct mpssas_softc *sassc = sc->sassc;
  277         path_id_t pathid;
  278         target_id_t targetid;
  279         union ccb *ccb;
  280 
  281         pathid = cam_sim_path(sassc->sim);
  282         if (targ == NULL)
  283                 targetid = CAM_TARGET_WILDCARD;
  284         else
  285                 targetid = targ - sassc->targets;
  286 
  287         /*
  288          * Allocate a CCB and schedule a rescan.
  289          */
  290         ccb = xpt_alloc_ccb();
  291 
  292         if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, pathid,
  293                             targetid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
  294                 mps_dprint(sc, MPS_FAULT, "unable to create path for rescan\n");
  295                 xpt_free_ccb(ccb);
  296                 return;
  297         }
  298 
  299         /* XXX Hardwired to scan the bus for now */
  300         ccb->ccb_h.func_code = XPT_SCAN_BUS;
  301         mps_dprint(sc, MPS_TRACE, "%s targetid %u\n", __func__, targetid);
  302         mpssas_rescan(sassc, ccb);
  303 }
  304 
  305 static void
  306 mpssas_log_command(struct mps_command *cm, const char *fmt, ...)
  307 {
  308         struct sbuf sb;
  309         __va_list ap;
  310         char str[192];
  311         char path_str[64];
  312 
  313         if (cm == NULL)
  314                 return;
  315 
  316         sbuf_new(&sb, str, sizeof(str), 0);
  317 
  318         __va_start(ap, fmt);
  319 
  320         if (cm->cm_ccb != NULL) {
  321                 xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
  322                                 sizeof(path_str));
  323                 sbuf_cat(&sb, path_str);
  324                 if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
  325                         scsi_command_string(&cm->cm_ccb->csio, &sb);
  326                         sbuf_printf(&sb, "length %d ",
  327                                     cm->cm_ccb->csio.dxfer_len);
  328                 }
  329         }
  330         else {
  331                 sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
  332                     cam_sim_name(cm->cm_sc->sassc->sim),
  333                     cam_sim_unit(cm->cm_sc->sassc->sim),
  334                     cam_sim_bus(cm->cm_sc->sassc->sim),
  335                     cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
  336                     cm->cm_lun);
  337         }
  338 
  339         sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
  340         sbuf_vprintf(&sb, fmt, ap);
  341         sbuf_finish(&sb);
  342         kprintf("%s", sbuf_data(&sb));
  343 
  344         __va_end(ap);
  345 }
  346 
  347 static void
  348 mpssas_remove_volume(struct mps_softc *sc, struct mps_command *tm)
  349 {
  350         MPI2_SCSI_TASK_MANAGE_REPLY *reply;
  351         struct mpssas_target *targ;
  352         uint16_t handle;
  353 
  354         mps_dprint(sc, MPS_INFO, "%s\n", __func__);
  355 
  356         reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
  357         handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
  358         targ = tm->cm_targ;
  359 
  360         if (reply == NULL) {
  361                 /* XXX retry the remove after the diag reset completes? */
  362                 mps_printf(sc, "%s NULL reply reseting device 0x%04x\n",
  363                            __func__, handle);
  364                 mpssas_free_tm(sc, tm);
  365                 return;
  366         }
  367 
  368         if (reply->IOCStatus != MPI2_IOCSTATUS_SUCCESS) {
  369                 mps_printf(sc, "IOCStatus = 0x%x while resetting device 0x%x\n",
  370                            reply->IOCStatus, handle);
  371                 mpssas_free_tm(sc, tm);
  372                 return;
  373         }
  374 
  375         mps_printf(sc, "Reset aborted %u commands\n", reply->TerminationCount);
  376         mps_free_reply(sc, tm->cm_reply_data);
  377         tm->cm_reply = NULL;    /* Ensures the reply won't get re-freed */
  378 
  379         mps_printf(sc, "clearing target %u handle 0x%04x\n", targ->tid, handle);
  380 
  381         /*
  382          * Don't clear target if remove fails because things will get confusing.
  383          * Leave the devname and sasaddr intact so that we know to avoid reusing
  384          * this target id if possible, and so we can assign the same target id
  385          * to this device if it comes back in the future.
  386          */
  387         if (reply->IOCStatus == MPI2_IOCSTATUS_SUCCESS) {
  388                 targ = tm->cm_targ;
  389                 targ->handle = 0x0;
  390                 targ->encl_handle = 0x0;
  391                 targ->encl_slot = 0x0;
  392                 targ->exp_dev_handle = 0x0;
  393                 targ->phy_num = 0x0;
  394                 targ->linkrate = 0x0;
  395                 targ->devinfo = 0x0;
  396                 targ->flags = 0x0;
  397         }
  398 
  399         mpssas_free_tm(sc, tm);
  400 }
  401 
  402 /*
  403  * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
  404  * Otherwise Volume Delete is same as Bare Drive Removal.
  405  */
  406 void
  407 mpssas_prepare_volume_remove(struct mpssas_softc *sassc, uint16_t handle)
  408 {
  409         MPI2_SCSI_TASK_MANAGE_REQUEST *req;
  410         struct mps_softc *sc;
  411         struct mps_command *cm;
  412         struct mpssas_target *targ = NULL;
  413 
  414         mps_dprint(sassc->sc, MPS_INFO, "%s\n", __func__);
  415         sc = sassc->sc;
  416 
  417 #ifdef WD_SUPPORT
  418         /*
  419          * If this is a WD controller, determine if the disk should be exposed
  420          * to the OS or not.  If disk should be exposed, return from this
  421          * function without doing anything.
  422          */
  423         if (sc->WD_available && (sc->WD_hide_expose ==
  424             MPS_WD_EXPOSE_ALWAYS)) {
  425                 return;
  426         }
  427 #endif
  428 
  429         targ = mpssas_find_target_by_handle(sassc, 0, handle);
  430         if (targ == NULL) {
  431                 /* FIXME: what is the action? */
  432                 /* We don't know about this device? */
  433                 kprintf("%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
  434                 return;
  435         }
  436 
  437         targ->flags |= MPSSAS_TARGET_INREMOVAL;
  438 
  439         cm = mpssas_alloc_tm(sc);
  440         if (cm == NULL) {
  441                 mps_printf(sc, "%s: command alloc failure\n", __func__);
  442                 return;
  443         }
  444 
  445         mpssas_rescan_target(sc, targ);
  446 
  447         req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
  448         req->DevHandle = targ->handle;
  449         req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
  450         req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
  451 
  452         /* SAS Hard Link Reset / SATA Link Reset */
  453         req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
  454 
  455         cm->cm_targ = targ;
  456         cm->cm_data = NULL;
  457         cm->cm_desc.HighPriority.RequestFlags =
  458                 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
  459         cm->cm_complete = mpssas_remove_volume;
  460         cm->cm_complete_data = (void *)(uintptr_t)handle;
  461         mps_map_command(sc, cm);
  462 }
  463 
  464 /*
  465  * The MPT2 firmware performs debounce on the link to avoid transient link
  466  * errors and false removals.  When it does decide that link has been lost
  467  * and a device need to go away, it expects that the host will perform a
  468  * target reset and then an op remove.  The reset has the side-effect of
  469  * aborting any outstanding requests for the device, which is required for
  470  * the op-remove to succeed.  It's not clear if the host should check for
  471  * the device coming back alive after the reset.
  472  */
  473 void
  474 mpssas_prepare_remove(struct mpssas_softc *sassc, uint16_t handle)
  475 {
  476         MPI2_SCSI_TASK_MANAGE_REQUEST *req;
  477         struct mps_softc *sc;
  478         struct mps_command *cm;
  479         struct mpssas_target *targ = NULL;
  480 
  481         mps_dprint(sassc->sc, MPS_TRACE, "%s\n", __func__);
  482 
  483         /*
  484          * If this is a WD controller, determine if the disk should be exposed
  485          * to the OS or not.  If disk should be exposed, return from this
  486          * function without doing anything.
  487          */
  488         sc = sassc->sc;
  489         if ((sc->mps_flags & MPS_FLAGS_WD_AVAILABLE) && (sc->WD_hide_expose ==
  490             MPS_WD_EXPOSE_ALWAYS)) {
  491                 return;
  492         }
  493 
  494         targ = mpssas_find_target_by_handle(sassc, 0, handle);
  495         if (targ == NULL) {
  496                 /* FIXME: what is the action? */
  497                 /* We don't know about this device? */
  498                 kprintf("%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
  499                 return;
  500         }
  501 
  502         targ->flags |= MPSSAS_TARGET_INREMOVAL;
  503 
  504         cm = mpssas_alloc_tm(sc);
  505         if (cm == NULL) {
  506                 mps_printf(sc, "%s: command alloc failure\n", __func__);
  507                 return;
  508         }
  509 
  510         mpssas_rescan_target(sc, targ);
  511 
  512         req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
  513         memset(req, 0, sizeof(*req));
  514         req->DevHandle = targ->handle;
  515         req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
  516         req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
  517 
  518         /* SAS Hard Link Reset / SATA Link Reset */
  519         req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
  520 
  521         cm->cm_targ = targ;
  522         cm->cm_data = NULL;
  523         cm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
  524         cm->cm_complete = mpssas_remove_device;
  525         cm->cm_complete_data = (void *)(uintptr_t)handle;
  526         mps_map_command(sc, cm);
  527 }
  528 
  529 static void
  530 mpssas_remove_device(struct mps_softc *sc, struct mps_command *tm)
  531 {
  532         MPI2_SCSI_TASK_MANAGE_REPLY *reply;
  533         MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
  534         struct mpssas_target *targ;
  535         struct mps_command *next_cm;
  536         uint16_t handle;
  537 
  538         mps_dprint(sc, MPS_INFO, "%s\n", __func__);
  539 
  540         reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
  541         handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
  542         targ = tm->cm_targ;
  543 
  544         /*
  545          * Currently there should be no way we can hit this case.  It only
  546          * happens when we have a failure to allocate chain frames, and
  547          * task management commands don't have S/G lists.
  548          */
  549         if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
  550                 mps_printf(sc, "%s: cm_flags = %#x for remove of handle %#04x! "
  551                            "This should not happen!\n", __func__, tm->cm_flags,
  552                            handle);
  553                 mpssas_free_tm(sc, tm);
  554                 return;
  555         }
  556 
  557         if (reply == NULL) {
  558                 /* XXX retry the remove after the diag reset completes? */
  559                 mps_printf(sc, "%s NULL reply reseting device 0x%04x\n",
  560                     __func__, handle);
  561                 mpssas_free_tm(sc, tm);
  562                 return;
  563         }
  564 
  565         if (reply->IOCStatus != MPI2_IOCSTATUS_SUCCESS) {
  566                 mps_printf(sc, "IOCStatus = 0x%x while resetting device 0x%x\n",
  567                    reply->IOCStatus, handle);
  568                 mpssas_free_tm(sc, tm);
  569                 return;
  570         }
  571 
  572         mps_dprint(sc, MPS_INFO, "Reset aborted %u commands\n",
  573             reply->TerminationCount);
  574         mps_free_reply(sc, tm->cm_reply_data);
  575         tm->cm_reply = NULL;    /* Ensures the reply won't get re-freed */
  576 
  577         /* Reuse the existing command */
  578         req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
  579         memset(req, 0, sizeof(*req));
  580         req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
  581         req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
  582         req->DevHandle = handle;
  583         tm->cm_data = NULL;
  584         tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
  585         tm->cm_complete = mpssas_remove_complete;
  586         tm->cm_complete_data = (void *)(uintptr_t)handle;
  587 
  588         mps_map_command(sc, tm);
  589 
  590         mps_dprint(sc, MPS_INFO, "clearing target %u handle 0x%04x\n",
  591                    targ->tid, handle);
  592         TAILQ_FOREACH_MUTABLE(tm, &targ->commands, cm_link, next_cm) {
  593                 union ccb *ccb;
  594 
  595                 mps_dprint(sc, MPS_INFO, "Completing missed command %p\n", tm);
  596                 ccb = tm->cm_complete_data;
  597                 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
  598                 mpssas_scsiio_complete(sc, tm);
  599         }
  600 }
  601 
  602 static void
  603 mpssas_remove_complete(struct mps_softc *sc, struct mps_command *tm)
  604 {
  605         MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
  606         uint16_t handle;
  607         struct mpssas_target *targ;
  608 
  609         mps_dprint(sc, MPS_INFO, "%s\n", __func__);
  610 
  611         reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
  612         handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
  613 
  614         /*
  615          * Currently there should be no way we can hit this case.  It only
  616          * happens when we have a failure to allocate chain frames, and
  617          * task management commands don't have S/G lists.
  618          */
  619         if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
  620                 mps_printf(sc, "%s: cm_flags = %#x for remove of handle %#04x! "
  621                            "This should not happen!\n", __func__, tm->cm_flags,
  622                            handle);
  623                 mpssas_free_tm(sc, tm);
  624                 return;
  625         }
  626 
  627         if (reply == NULL) {
  628                 /* most likely a chip reset */
  629                 mps_printf(sc, "%s NULL reply removing device 0x%04x\n",
  630                     __func__, handle);
  631                 mpssas_free_tm(sc, tm);
  632                 return;
  633         }
  634 
  635         mps_printf(sc, "%s on handle 0x%04x, IOCStatus= 0x%x\n", __func__,
  636             handle, reply->IOCStatus);
  637 
  638         /*
  639          * Don't clear target if remove fails because things will get confusing.
  640          * Leave the devname and sasaddr intact so that we know to avoid reusing
  641          * this target id if possible, and so we can assign the same target id
  642          * to this device if it comes back in the future.
  643          */
  644         if (reply->IOCStatus == MPI2_IOCSTATUS_SUCCESS) {
  645                 targ = tm->cm_targ;
  646                 targ->handle = 0x0;
  647                 targ->encl_handle = 0x0;
  648                 targ->encl_slot = 0x0;
  649                 targ->exp_dev_handle = 0x0;
  650                 targ->phy_num = 0x0;
  651                 targ->linkrate = 0x0;
  652                 targ->devinfo = 0x0;
  653                 targ->flags = 0x0;
  654         }
  655 
  656         mpssas_free_tm(sc, tm);
  657 }
  658 
  659 static int
  660 mpssas_register_events(struct mps_softc *sc)
  661 {
  662         uint8_t events[16];
  663 
  664         bzero(events, 16);
  665         setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
  666         setbit(events, MPI2_EVENT_SAS_DISCOVERY);
  667         setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
  668         setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
  669         setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
  670         setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
  671         setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
  672         setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
  673         setbit(events, MPI2_EVENT_IR_VOLUME);
  674         setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
  675         setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
  676         setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED);
  677 
  678         mps_register_events(sc, events, mpssas_evt_handler, NULL,
  679             &sc->sassc->mpssas_eh);
  680 
  681         return (0);
  682 }
  683 
  684 int
  685 mps_attach_sas(struct mps_softc *sc)
  686 {
  687         struct mpssas_softc *sassc;
  688 #if __FreeBSD_version >= 1000006
  689         cam_status status;
  690 #endif
  691         int unit, error = 0;
  692 
  693         mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
  694 
  695         sassc = kmalloc(sizeof(struct mpssas_softc), M_MPT2, M_WAITOK|M_ZERO);
  696         sassc->targets = kmalloc(sizeof(struct mpssas_target) *
  697             sc->facts->MaxTargets, M_MPT2, M_WAITOK|M_ZERO);
  698         sc->sassc = sassc;
  699         sassc->sc = sc;
  700 
  701         if ((sassc->devq = cam_simq_alloc(sc->num_reqs)) == NULL) {
  702                 mps_dprint(sc, MPS_FAULT, "Cannot allocate SIMQ\n");
  703                 error = ENOMEM;
  704                 goto out;
  705         }
  706 
  707         unit = device_get_unit(sc->mps_dev);
  708         sassc->sim = cam_sim_alloc(mpssas_action, mpssas_poll, "mps", sassc,
  709             unit, &sc->mps_lock, sc->num_reqs, sc->num_reqs, sassc->devq);
  710         cam_simq_release(sassc->devq);
  711         if (sassc->sim == NULL) {
  712                 mps_dprint(sc, MPS_FAULT, "Cannot allocate SIM\n");
  713                 error = EINVAL;
  714                 goto out;
  715         }
  716 
  717         TAILQ_INIT(&sassc->ev_queue);
  718 
  719         /* Initialize taskqueue for Event Handling */
  720         TASK_INIT(&sassc->ev_task, 0, mpssas_firmware_event_work, sc);
  721         sassc->ev_tq = taskqueue_create("mps_taskq", M_INTWAIT | M_ZERO,
  722             taskqueue_thread_enqueue, &sassc->ev_tq);
  723 
  724         /* Run the task queue with lowest priority */
  725         taskqueue_start_threads(&sassc->ev_tq, 1, 255, -1, "%s taskq",
  726             device_get_nameunit(sc->mps_dev));
  727 
  728         TAILQ_INIT(&sassc->ccb_scanq);
  729         error = mps_kproc_create(mpssas_scanner_thread, sassc,
  730             &sassc->rescan_thread, 0, 0, "mps_scan%d", unit);
  731         if (error) {
  732                 mps_printf(sc, "Error %d starting rescan thread\n", error);
  733                 goto out;
  734         }
  735 
  736         mps_lock(sc);
  737         sassc->flags |= MPSSAS_SCANTHREAD;
  738 
  739         /*
  740          * XXX There should be a bus for every port on the adapter, but since
  741          * we're just going to fake the topology for now, we'll pretend that
  742          * everything is just a target on a single bus.
  743          */
  744         if ((error = xpt_bus_register(sassc->sim, 0)) != 0) {
  745                 mps_dprint(sc, MPS_FAULT, "Error %d registering SCSI bus\n",
  746                     error);
  747                 mps_unlock(sc);
  748                 goto out;
  749         }
  750 
  751         /*
  752          * Assume that discovery events will start right away.  Freezing
  753          * the simq will prevent the CAM boottime scanner from running
  754          * before discovery is complete.
  755          */
  756         sassc->flags |= MPSSAS_IN_STARTUP | MPSSAS_IN_DISCOVERY;
  757         xpt_freeze_simq(sassc->sim, 1);
  758         sc->sassc->startup_refcount = 0;
  759 
  760         callout_init_mp(&sassc->discovery_callout);
  761         sassc->discovery_timeouts = 0;
  762 
  763         sassc->tm_count = 0;
  764 
  765 #if __FreeBSD_version >= 1000006
  766         status = xpt_register_async(AC_ADVINFO_CHANGED, mpssas_async, sc, NULL);
  767         if (status != CAM_REQ_CMP) {
  768                 mps_printf(sc, "Error %#x registering async handler for "
  769                            "AC_ADVINFO_CHANGED events\n", status);
  770         }
  771 #endif
  772 
  773         mps_unlock(sc);
  774 
  775         mpssas_register_events(sc);
  776 out:
  777         if (error)
  778                 mps_detach_sas(sc);
  779         return (error);
  780 }
  781 
  782 int
  783 mps_detach_sas(struct mps_softc *sc)
  784 {
  785         struct mpssas_softc *sassc;
  786 
  787         mps_dprint(sc, MPS_INFO, "%s\n", __func__);
  788 
  789         if (sc->sassc == NULL)
  790                 return (0);
  791 
  792         sassc = sc->sassc;
  793         mps_deregister_events(sc, sassc->mpssas_eh);
  794 
  795         /*
  796          * Drain and free the event handling taskqueue with the lock
  797          * unheld so that any parallel processing tasks drain properly
  798          * without deadlocking.
  799          */
  800         if (sassc->ev_tq != NULL)
  801                 taskqueue_free(sassc->ev_tq);
  802 
  803         /* Make sure CAM doesn't wedge if we had to bail out early. */
  804         mps_lock(sc);
  805 
  806         /* Deregister our async handler */
  807 #if __FreeBSD_version >= 1000006
  808         xpt_register_async(0, mpssas_async, sc, NULL);
  809 #endif
  810 
  811         if (sassc->flags & MPSSAS_IN_STARTUP)
  812                 xpt_release_simq(sassc->sim, 1);
  813 
  814         if (sassc->sim != NULL) {
  815                 xpt_bus_deregister(cam_sim_path(sassc->sim));
  816                 cam_sim_free(sassc->sim);
  817         }
  818 
  819         if (sassc->flags & MPSSAS_SCANTHREAD) {
  820                 sassc->flags |= MPSSAS_SHUTDOWN;
  821                 wakeup(&sassc->ccb_scanq);
  822 
  823                 if (sassc->flags & MPSSAS_SCANTHREAD) {
  824                         lksleep(&sassc->flags, &sc->mps_lock, 0,
  825                                "mps_shutdown", 30 * hz);
  826                 }
  827         }
  828         mps_unlock(sc);
  829 
  830         kfree(sassc->targets, M_MPT2);
  831         kfree(sassc, M_MPT2);
  832         sc->sassc = NULL;
  833 
  834         return (0);
  835 }
  836 
  837 void
  838 mpssas_discovery_end(struct mpssas_softc *sassc)
  839 {
  840         struct mps_softc *sc = sassc->sc;
  841 
  842         mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
  843 
  844         if (sassc->flags & MPSSAS_DISCOVERY_TIMEOUT_PENDING)
  845                 callout_stop(&sassc->discovery_callout);
  846 
  847 }
  848 
  849 #if 0 /* XXX unused */
  850 static void
  851 mpssas_discovery_timeout(void *data)
  852 {
  853         struct mpssas_softc *sassc = data;
  854         struct mps_softc *sc;
  855 
  856         sc = sassc->sc;
  857         mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
  858 
  859         mps_lock(sc);
  860         mps_printf(sc,
  861             "Timeout waiting for discovery, interrupts may not be working!\n");
  862         sassc->flags &= ~MPSSAS_DISCOVERY_TIMEOUT_PENDING;
  863 
  864         /* Poll the hardware for events in case interrupts aren't working */
  865         mps_intr_locked(sc);
  866 
  867         mps_printf(sassc->sc,
  868             "Finished polling after discovery timeout at %d\n", ticks);
  869 
  870         if ((sassc->flags & MPSSAS_IN_DISCOVERY) == 0) {
  871                 mpssas_discovery_end(sassc);
  872         } else {
  873                 if (sassc->discovery_timeouts < MPSSAS_MAX_DISCOVERY_TIMEOUTS) {
  874                         sassc->flags |= MPSSAS_DISCOVERY_TIMEOUT_PENDING;
  875                         callout_reset(&sassc->discovery_callout,
  876                             MPSSAS_DISCOVERY_TIMEOUT * hz,
  877                             mpssas_discovery_timeout, sassc);
  878                         sassc->discovery_timeouts++;
  879                 } else {
  880                         mps_dprint(sassc->sc, MPS_FAULT,
  881                             "Discovery timed out, continuing.\n");
  882                         sassc->flags &= ~MPSSAS_IN_DISCOVERY;
  883                         mpssas_discovery_end(sassc);
  884                 }
  885         }
  886 
  887         mps_unlock(sc);
  888 }
  889 #endif
  890 
  891 static void
  892 mpssas_action(struct cam_sim *sim, union ccb *ccb)
  893 {
  894         struct mpssas_softc *sassc;
  895 
  896         sassc = cam_sim_softc(sim);
  897 
  898         mps_dprint(sassc->sc, MPS_TRACE, "%s func 0x%x\n", __func__,
  899             ccb->ccb_h.func_code);
  900         KKASSERT(lockstatus(&sassc->sc->mps_lock, curthread) != 0);
  901 
  902         switch (ccb->ccb_h.func_code) {
  903         case XPT_PATH_INQ:
  904         {
  905                 struct ccb_pathinq *cpi = &ccb->cpi;
  906 
  907                 cpi->version_num = 1;
  908                 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
  909                 cpi->target_sprt = 0;
  910                 cpi->hba_misc = PIM_NOBUSRESET;
  911                 cpi->hba_eng_cnt = 0;
  912                 cpi->max_target = sassc->sc->facts->MaxTargets - 1;
  913                 cpi->max_lun = 8;
  914                 cpi->initiator_id = sassc->sc->facts->MaxTargets - 1;
  915                 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
  916                 strncpy(cpi->hba_vid, "LSILogic", HBA_IDLEN);
  917                 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
  918                 cpi->unit_number = cam_sim_unit(sim);
  919                 cpi->bus_id = cam_sim_bus(sim);
  920                 cpi->base_transfer_speed = 150000;
  921                 cpi->transport = XPORT_SAS;
  922                 cpi->transport_version = 0;
  923                 cpi->protocol = PROTO_SCSI;
  924                 cpi->protocol_version = SCSI_REV_SPC;
  925 #if __FreeBSD_version >= 800001
  926                 /*
  927                  * XXX KDM where does this number come from?
  928                  */
  929                 cpi->maxio = 256 * 1024;
  930 #endif
  931                 cpi->ccb_h.status = CAM_REQ_CMP;
  932                 break;
  933         }
  934         case XPT_GET_TRAN_SETTINGS:
  935         {
  936                 struct ccb_trans_settings       *cts;
  937                 struct ccb_trans_settings_sas   *sas;
  938                 struct ccb_trans_settings_scsi  *scsi;
  939                 struct mpssas_target *targ;
  940 
  941                 cts = &ccb->cts;
  942                 sas = &cts->xport_specific.sas;
  943                 scsi = &cts->proto_specific.scsi;
  944 
  945                 targ = &sassc->targets[cts->ccb_h.target_id];
  946                 if (targ->handle == 0x0) {
  947                         cts->ccb_h.status = CAM_TID_INVALID;
  948                         break;
  949                 }
  950 
  951                 cts->protocol_version = SCSI_REV_SPC2;
  952                 cts->transport = XPORT_SAS;
  953                 cts->transport_version = 0;
  954 
  955                 sas->valid = CTS_SAS_VALID_SPEED;
  956                 switch (targ->linkrate) {
  957                 case 0x08:
  958                         sas->bitrate = 150000;
  959                         break;
  960                 case 0x09:
  961                         sas->bitrate = 300000;
  962                         break;
  963                 case 0x0a:
  964                         sas->bitrate = 600000;
  965                         break;
  966                 default:
  967                         sas->valid = 0;
  968                 }
  969 
  970                 cts->protocol = PROTO_SCSI;
  971                 scsi->valid = CTS_SCSI_VALID_TQ;
  972                 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
  973 
  974                 cts->ccb_h.status = CAM_REQ_CMP;
  975                 break;
  976         }
  977         case XPT_CALC_GEOMETRY:
  978                 cam_calc_geometry(&ccb->ccg, /*extended*/1);
  979                 ccb->ccb_h.status = CAM_REQ_CMP;
  980                 break;
  981         case XPT_RESET_DEV:
  982                 mps_printf(sassc->sc, "mpssas_action XPT_RESET_DEV\n");
  983                 mpssas_action_resetdev(sassc, ccb);
  984                 return;
  985         case XPT_RESET_BUS:
  986         case XPT_ABORT:
  987         case XPT_TERM_IO:
  988                 mps_printf(sassc->sc, "mpssas_action faking success for "
  989                            "abort or reset\n");
  990                 ccb->ccb_h.status = CAM_REQ_CMP;
  991                 break;
  992         case XPT_SCSI_IO:
  993                 mpssas_action_scsiio(sassc, ccb);
  994                 return;
  995 #if __FreeBSD_version >= 900026
  996         case XPT_SMP_IO:
  997                 mpssas_action_smpio(sassc, ccb);
  998                 return;
  999 #endif
 1000         default:
 1001                 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
 1002                 break;
 1003         }
 1004         xpt_done(ccb);
 1005 
 1006 }
 1007 
 1008 static void
 1009 mpssas_announce_reset(struct mps_softc *sc, uint32_t ac_code,
 1010     target_id_t target_id, lun_id_t lun_id)
 1011 {
 1012         path_id_t path_id = cam_sim_path(sc->sassc->sim);
 1013         struct cam_path *path;
 1014 
 1015         mps_printf(sc, "%s code %x target %d lun %d\n", __func__,
 1016             ac_code, target_id, lun_id);
 1017 
 1018         if (xpt_create_path(&path, NULL,
 1019                 path_id, target_id, lun_id) != CAM_REQ_CMP) {
 1020                 mps_printf(sc, "unable to create path for reset "
 1021                            "notification\n");
 1022                 return;
 1023         }
 1024 
 1025         xpt_async(ac_code, path, NULL);
 1026         xpt_free_path(path);
 1027 }
 1028 
 1029 static void
 1030 mpssas_complete_all_commands(struct mps_softc *sc)
 1031 {
 1032         struct mps_command *cm;
 1033         int i;
 1034         int completed;
 1035 
 1036         mps_printf(sc, "%s\n", __func__);
 1037         KKASSERT(lockstatus(&sc->mps_lock, curthread) != 0);
 1038 
 1039         /* complete all commands with a NULL reply */
 1040         for (i = 1; i < sc->num_reqs; i++) {
 1041                 cm = &sc->commands[i];
 1042                 cm->cm_reply = NULL;
 1043                 completed = 0;
 1044 
 1045                 if (cm->cm_flags & MPS_CM_FLAGS_POLLED)
 1046                         cm->cm_flags |= MPS_CM_FLAGS_COMPLETE;
 1047 
 1048                 if (cm->cm_complete != NULL) {
 1049                         mpssas_log_command(cm,
 1050                             "completing cm %p state %x ccb %p for diag reset\n",
 1051                             cm, cm->cm_state, cm->cm_ccb);
 1052 
 1053                         cm->cm_complete(sc, cm);
 1054                         completed = 1;
 1055                 }
 1056 
 1057                 if (cm->cm_flags & MPS_CM_FLAGS_WAKEUP) {
 1058                         mpssas_log_command(cm,
 1059                             "waking up cm %p state %x ccb %p for diag reset\n",
 1060                             cm, cm->cm_state, cm->cm_ccb);
 1061                         wakeup(cm);
 1062                         completed = 1;
 1063                 }
 1064 
 1065                 if ((completed == 0) && (cm->cm_state != MPS_CM_STATE_FREE)) {
 1066                         /* this should never happen, but if it does, log */
 1067                         mpssas_log_command(cm,
 1068                             "cm %p state %x flags 0x%x ccb %p during diag "
 1069                             "reset\n", cm, cm->cm_state, cm->cm_flags,
 1070                             cm->cm_ccb);
 1071                 }
 1072         }
 1073 }
 1074 
 1075 void
 1076 mpssas_handle_reinit(struct mps_softc *sc)
 1077 {
 1078         int i;
 1079 
 1080         /* Go back into startup mode and freeze the simq, so that CAM
 1081          * doesn't send any commands until after we've rediscovered all
 1082          * targets and found the proper device handles for them.
 1083          *
 1084          * After the reset, portenable will trigger discovery, and after all
 1085          * discovery-related activities have finished, the simq will be
 1086          * released.
 1087          */
 1088         mps_printf(sc, "%s startup\n", __func__);
 1089         sc->sassc->flags |= MPSSAS_IN_STARTUP;
 1090         sc->sassc->flags |= MPSSAS_IN_DISCOVERY;
 1091         xpt_freeze_simq(sc->sassc->sim, 1);
 1092 
 1093         /* notify CAM of a bus reset */
 1094         mpssas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
 1095             CAM_LUN_WILDCARD);
 1096 
 1097         /* complete and cleanup after all outstanding commands */
 1098         mpssas_complete_all_commands(sc);
 1099 
 1100         mps_printf(sc, "%s startup %u tm %u after command completion\n",
 1101             __func__, sc->sassc->startup_refcount, sc->sassc->tm_count);
 1102 
 1103         /*
 1104          * The simq was explicitly frozen above, so set the refcount to 0.
 1105          * The simq will be explicitly released after port enable completes.
 1106          */
 1107         sc->sassc->startup_refcount = 0;
 1108 
 1109         /* zero all the target handles, since they may change after the
 1110          * reset, and we have to rediscover all the targets and use the new
 1111          * handles.
 1112          */
 1113         for (i = 0; i < sc->facts->MaxTargets; i++) {
 1114                 if (sc->sassc->targets[i].outstanding != 0)
 1115                         mps_printf(sc, "target %u outstanding %u\n",
 1116                             i, sc->sassc->targets[i].outstanding);
 1117                 sc->sassc->targets[i].handle = 0x0;
 1118                 sc->sassc->targets[i].exp_dev_handle = 0x0;
 1119                 sc->sassc->targets[i].outstanding = 0;
 1120                 sc->sassc->targets[i].flags = MPSSAS_TARGET_INDIAGRESET;
 1121         }
 1122 }
 1123 static void
 1124 mpssas_tm_timeout(void *data)
 1125 {
 1126         struct mps_command *tm = data;
 1127         struct mps_softc *sc = tm->cm_sc;
 1128 
 1129         mps_lock(sc);
 1130         mpssas_log_command(tm, "task mgmt %p timed out\n", tm);
 1131         mps_reinit(sc);
 1132         mps_unlock(sc);
 1133 }
 1134 
 1135 static void
 1136 mpssas_logical_unit_reset_complete(struct mps_softc *sc, struct mps_command *tm)
 1137 {
 1138         MPI2_SCSI_TASK_MANAGE_REPLY *reply;
 1139         unsigned int cm_count = 0;
 1140         struct mps_command *cm;
 1141         struct mpssas_target *targ;
 1142 
 1143         callout_stop(&tm->cm_callout);
 1144 
 1145         reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
 1146         targ = tm->cm_targ;
 1147 
 1148         /*
 1149          * Currently there should be no way we can hit this case.  It only
 1150          * happens when we have a failure to allocate chain frames, and
 1151          * task management commands don't have S/G lists.
 1152          */
 1153         if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
 1154                 mps_printf(sc, "%s: cm_flags = %#x for LUN reset! "
 1155                            "This should not happen!\n", __func__, tm->cm_flags);
 1156                 mpssas_free_tm(sc, tm);
 1157                 return;
 1158         }
 1159 
 1160         if (reply == NULL) {
 1161                 mpssas_log_command(tm, "NULL reset reply for tm %p\n", tm);
 1162                 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
 1163                         /* this completion was due to a reset, just cleanup */
 1164                         targ->flags &= ~MPSSAS_TARGET_INRESET;
 1165                         targ->tm = NULL;
 1166                         mpssas_free_tm(sc, tm);
 1167                 }
 1168                 else {
 1169                         /* we should have gotten a reply. */
 1170                         mps_reinit(sc);
 1171                 }
 1172                 return;
 1173         }
 1174 
 1175         mpssas_log_command(tm,
 1176             "logical unit reset status 0x%x code 0x%x count %u\n",
 1177             reply->IOCStatus, reply->ResponseCode,
 1178             reply->TerminationCount);
 1179 
 1180         /* See if there are any outstanding commands for this LUN.
 1181          * This could be made more efficient by using a per-LU data
 1182          * structure of some sort.
 1183          */
 1184         TAILQ_FOREACH(cm, &targ->commands, cm_link) {
 1185                 if (cm->cm_lun == tm->cm_lun)
 1186                         cm_count++;
 1187         }
 1188 
 1189         if (cm_count == 0) {
 1190                 mpssas_log_command(tm,
 1191                     "logical unit %u finished recovery after reset\n",
 1192                     tm->cm_lun);
 1193 
 1194                 mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
 1195                     tm->cm_lun);
 1196 
 1197                 /* we've finished recovery for this logical unit.  check and
 1198                  * see if some other logical unit has a timedout command
 1199                  * that needs to be processed.
 1200                  */
 1201                 cm = TAILQ_FIRST(&targ->timedout_commands);
 1202                 if (cm) {
 1203                         mpssas_send_abort(sc, tm, cm);
 1204                 }
 1205                 else {
 1206                         targ->tm = NULL;
 1207                         mpssas_free_tm(sc, tm);
 1208                 }
 1209         }
 1210         else {
 1211                 /* if we still have commands for this LUN, the reset
 1212                  * effectively failed, regardless of the status reported.
 1213                  * Escalate to a target reset.
 1214                  */
 1215                 mpssas_log_command(tm,
 1216                     "logical unit reset complete for tm %p, but still have %u command(s)\n",
 1217                     tm, cm_count);
 1218                 mpssas_send_reset(sc, tm,
 1219                     MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
 1220         }
 1221 }
 1222 
 1223 static void
 1224 mpssas_target_reset_complete(struct mps_softc *sc, struct mps_command *tm)
 1225 {
 1226         MPI2_SCSI_TASK_MANAGE_REPLY *reply;
 1227         struct mpssas_target *targ;
 1228 
 1229         callout_stop(&tm->cm_callout);
 1230 
 1231         reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
 1232         targ = tm->cm_targ;
 1233 
 1234         /*
 1235          * Currently there should be no way we can hit this case.  It only
 1236          * happens when we have a failure to allocate chain frames, and
 1237          * task management commands don't have S/G lists.
 1238          */
 1239         if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
 1240                 mps_printf(sc, "%s: cm_flags = %#x for target reset! "
 1241                            "This should not happen!\n", __func__, tm->cm_flags);
 1242                 mpssas_free_tm(sc, tm);
 1243                 return;
 1244         }
 1245 
 1246         if (reply == NULL) {
 1247                 mpssas_log_command(tm, "NULL reset reply for tm %p\n", tm);
 1248                 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
 1249                         /* this completion was due to a reset, just cleanup */
 1250                         targ->flags &= ~MPSSAS_TARGET_INRESET;
 1251                         targ->tm = NULL;
 1252                         mpssas_free_tm(sc, tm);
 1253                 }
 1254                 else {
 1255                         /* we should have gotten a reply. */
 1256                         mps_reinit(sc);
 1257                 }
 1258                 return;
 1259         }
 1260 
 1261         mpssas_log_command(tm,
 1262             "target reset status 0x%x code 0x%x count %u\n",
 1263             reply->IOCStatus, reply->ResponseCode,
 1264             reply->TerminationCount);
 1265 
 1266         targ->flags &= ~MPSSAS_TARGET_INRESET;
 1267 
 1268         if (targ->outstanding == 0) {
 1269                 /* we've finished recovery for this target and all
 1270                  * of its logical units.
 1271                  */
 1272                 mpssas_log_command(tm,
 1273                     "recovery finished after target reset\n");
 1274 
 1275                 mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
 1276                     CAM_LUN_WILDCARD);
 1277 
 1278                 targ->tm = NULL;
 1279                 mpssas_free_tm(sc, tm);
 1280         }
 1281         else {
 1282                 /* after a target reset, if this target still has
 1283                  * outstanding commands, the reset effectively failed,
 1284                  * regardless of the status reported.  escalate.
 1285                  */
 1286                 mpssas_log_command(tm,
 1287                     "target reset complete for tm %p, but still have %u command(s)\n",
 1288                     tm, targ->outstanding);
 1289                 mps_reinit(sc);
 1290         }
 1291 }
 1292 
 1293 #define MPS_RESET_TIMEOUT 30
 1294 
 1295 static int
 1296 mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type)
 1297 {
 1298         MPI2_SCSI_TASK_MANAGE_REQUEST *req;
 1299         struct mpssas_target *target;
 1300         int err;
 1301 
 1302         target = tm->cm_targ;
 1303         if (target->handle == 0) {
 1304                 mps_printf(sc, "%s null devhandle for target_id %d\n",
 1305                     __func__, target->tid);
 1306                 return -1;
 1307         }
 1308 
 1309         req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
 1310         req->DevHandle = target->handle;
 1311         req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
 1312         req->TaskType = type;
 1313 
 1314         if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
 1315                 /* XXX Need to handle invalid LUNs */
 1316                 MPS_SET_LUN(req->LUN, tm->cm_lun);
 1317                 tm->cm_targ->logical_unit_resets++;
 1318                 mpssas_log_command(tm, "sending logical unit reset\n");
 1319                 tm->cm_complete = mpssas_logical_unit_reset_complete;
 1320         }
 1321         else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
 1322                 /* Target reset method =  SAS Hard Link Reset / SATA Link Reset */
 1323                 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
 1324                 tm->cm_targ->target_resets++;
 1325                 tm->cm_targ->flags |= MPSSAS_TARGET_INRESET;
 1326                 mpssas_log_command(tm, "sending target reset\n");
 1327                 tm->cm_complete = mpssas_target_reset_complete;
 1328         }
 1329         else {
 1330                 mps_printf(sc, "unexpected reset type 0x%x\n", type);
 1331                 return -1;
 1332         }
 1333 
 1334         tm->cm_data = NULL;
 1335         tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
 1336         tm->cm_complete_data = (void *)tm;
 1337 
 1338         callout_reset(&tm->cm_callout, MPS_RESET_TIMEOUT * hz,
 1339             mpssas_tm_timeout, tm);
 1340 
 1341         err = mps_map_command(sc, tm);
 1342         if (err)
 1343                 mpssas_log_command(tm,
 1344                     "error %d sending reset type %u\n",
 1345                     err, type);
 1346 
 1347         return err;
 1348 }
 1349 
 1350 
 1351 static void
 1352 mpssas_abort_complete(struct mps_softc *sc, struct mps_command *tm)
 1353 {
 1354         struct mps_command *cm;
 1355         MPI2_SCSI_TASK_MANAGE_REPLY *reply;
 1356         MPI2_SCSI_TASK_MANAGE_REQUEST *req;
 1357         struct mpssas_target *targ;
 1358 
 1359         callout_stop(&tm->cm_callout);
 1360 
 1361         req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
 1362         reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
 1363         targ = tm->cm_targ;
 1364 
 1365         /*
 1366          * Currently there should be no way we can hit this case.  It only
 1367          * happens when we have a failure to allocate chain frames, and
 1368          * task management commands don't have S/G lists.
 1369          */
 1370         if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
 1371                 mpssas_log_command(tm,
 1372                     "cm_flags = %#x for abort %p TaskMID %u!\n",
 1373                     tm->cm_flags, tm, req->TaskMID);
 1374                 mpssas_free_tm(sc, tm);
 1375                 return;
 1376         }
 1377 
 1378         if (reply == NULL) {
 1379                 mpssas_log_command(tm,
 1380                     "NULL abort reply for tm %p TaskMID %u\n",
 1381                     tm, req->TaskMID);
 1382                 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
 1383                         /* this completion was due to a reset, just cleanup */
 1384                         targ->tm = NULL;
 1385                         mpssas_free_tm(sc, tm);
 1386                 }
 1387                 else {
 1388                         /* we should have gotten a reply. */
 1389                         mps_reinit(sc);
 1390                 }
 1391                 return;
 1392         }
 1393 
 1394         mpssas_log_command(tm,
 1395             "abort TaskMID %u status 0x%x code 0x%x count %u\n",
 1396             req->TaskMID,
 1397             reply->IOCStatus, reply->ResponseCode,
 1398             reply->TerminationCount);
 1399 
 1400         cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
 1401         if (cm == NULL) {
 1402                 /* if there are no more timedout commands, we're done with
 1403                  * error recovery for this target.
 1404                  */
 1405                 mpssas_log_command(tm,
 1406                     "finished recovery after aborting TaskMID %u\n",
 1407                     req->TaskMID);
 1408 
 1409                 targ->tm = NULL;
 1410                 mpssas_free_tm(sc, tm);
 1411         }
 1412         else if (req->TaskMID != cm->cm_desc.Default.SMID) {
 1413                 /* abort success, but we have more timedout commands to abort */
 1414                 mpssas_log_command(tm,
 1415                     "continuing recovery after aborting TaskMID %u\n",
 1416                     req->TaskMID);
 1417 
 1418                 mpssas_send_abort(sc, tm, cm);
 1419         }
 1420         else {
 1421                 /* we didn't get a command completion, so the abort
 1422                  * failed as far as we're concerned.  escalate.
 1423                  */
 1424                 mpssas_log_command(tm,
 1425                     "abort failed for TaskMID %u tm %p\n",
 1426                     req->TaskMID, tm);
 1427 
 1428                 mpssas_send_reset(sc, tm,
 1429                     MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
 1430         }
 1431 }
 1432 
 1433 #define MPS_ABORT_TIMEOUT 5
 1434 
 1435 static int
 1436 mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm)
 1437 {
 1438         MPI2_SCSI_TASK_MANAGE_REQUEST *req;
 1439         struct mpssas_target *targ;
 1440         int err;
 1441 
 1442         targ = cm->cm_targ;
 1443         if (targ->handle == 0) {
 1444                 mps_printf(sc, "%s null devhandle for target_id %d\n",
 1445                     __func__, cm->cm_ccb->ccb_h.target_id);
 1446                 return -1;
 1447         }
 1448 
 1449         req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
 1450         req->DevHandle = targ->handle;
 1451         req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
 1452         req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
 1453 
 1454         /* XXX Need to handle invalid LUNs */
 1455         MPS_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
 1456 
 1457         req->TaskMID = cm->cm_desc.Default.SMID;
 1458 
 1459         tm->cm_data = NULL;
 1460         tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
 1461         tm->cm_complete = mpssas_abort_complete;
 1462         tm->cm_complete_data = (void *)tm;
 1463         tm->cm_targ = cm->cm_targ;
 1464         tm->cm_lun = cm->cm_lun;
 1465 
 1466         callout_reset(&tm->cm_callout, MPS_ABORT_TIMEOUT * hz,
 1467             mpssas_tm_timeout, tm);
 1468 
 1469         targ->aborts++;
 1470 
 1471         err = mps_map_command(sc, tm);
 1472         if (err)
 1473                 mpssas_log_command(tm,
 1474                     "error %d sending abort for cm %p SMID %u\n",
 1475                     err, cm, req->TaskMID);
 1476         return err;
 1477 }
 1478 
 1479 
 1480 static void
 1481 mpssas_scsiio_timeout(void *data)
 1482 {
 1483         struct mps_softc *sc;
 1484         struct mps_command *cm;
 1485         struct mpssas_target *targ;
 1486 
 1487         cm = (struct mps_command *)data;
 1488         sc = cm->cm_sc;
 1489 
 1490         KKASSERT(lockstatus(&sc->mps_lock, curthread) != 0);
 1491 
 1492         mps_printf(sc, "%s checking sc %p cm %p\n", __func__, sc, cm);
 1493 
 1494         /*
 1495          * Run the interrupt handler to make sure it's not pending.  This
 1496          * isn't perfect because the command could have already completed
 1497          * and been re-used, though this is unlikely.
 1498          */
 1499         mps_intr_locked(sc);
 1500         if (cm->cm_state == MPS_CM_STATE_FREE) {
 1501                 mps_printf(sc, "SCSI command %p sc %p almost timed out\n", cm, sc);
 1502                 return;
 1503         }
 1504 
 1505         if (cm->cm_ccb == NULL) {
 1506                 mps_printf(sc, "command timeout with NULL ccb\n");
 1507                 return;
 1508         }
 1509 
 1510         mpssas_log_command(cm, "command timeout cm %p ccb %p\n",
 1511             cm, cm->cm_ccb);
 1512 
 1513         targ = cm->cm_targ;
 1514         targ->timeouts++;
 1515 
 1516         /* XXX first, check the firmware state, to see if it's still
 1517          * operational.  if not, do a diag reset.
 1518          */
 1519 
 1520         cm->cm_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
 1521         cm->cm_state = MPS_CM_STATE_TIMEDOUT;
 1522         TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
 1523 
 1524         if (targ->tm != NULL) {
 1525                 /* target already in recovery, just queue up another
 1526                  * timedout command to be processed later.
 1527                  */
 1528                 mps_printf(sc, "queued timedout cm %p for processing by tm %p\n",
 1529                     cm, targ->tm);
 1530         }
 1531         else if ((targ->tm = mpssas_alloc_tm(sc)) != NULL) {
 1532                 mps_printf(sc, "timedout cm %p allocated tm %p\n",
 1533                     cm, targ->tm);
 1534 
 1535                 /* start recovery by aborting the first timedout command */
 1536                 mpssas_send_abort(sc, targ->tm, cm);
 1537         }
 1538         else {
 1539                 /* XXX queue this target up for recovery once a TM becomes
 1540                  * available.  The firmware only has a limited number of
 1541                  * HighPriority credits for the high priority requests used
 1542                  * for task management, and we ran out.
 1543                  *
 1544                  * Isilon: don't worry about this for now, since we have
 1545                  * more credits than disks in an enclosure, and limit
 1546                  * ourselves to one TM per target for recovery.
 1547                  */
 1548                 mps_printf(sc, "timedout cm %p failed to allocate a tm\n",
 1549                     cm);
 1550         }
 1551 
 1552 }
 1553 
 1554 static void
 1555 mpssas_action_scsiio(struct mpssas_softc *sassc, union ccb *ccb)
 1556 {
 1557         MPI2_SCSI_IO_REQUEST *req;
 1558         struct ccb_scsiio *csio;
 1559         struct mps_softc *sc;
 1560         struct mpssas_target *targ;
 1561         struct mpssas_lun *lun;
 1562         struct mps_command *cm;
 1563         uint8_t i, lba_byte, *ref_tag_addr;
 1564         uint16_t eedp_flags;
 1565 
 1566         sc = sassc->sc;
 1567         mps_dprint(sc, MPS_TRACE, "%s ccb %p\n", __func__, ccb);
 1568         KKASSERT(lockstatus(&sc->mps_lock, curthread) != 0);
 1569 
 1570         csio = &ccb->csio;
 1571         targ = &sassc->targets[csio->ccb_h.target_id];
 1572         if (targ->handle == 0x0) {
 1573                 mps_dprint(sc, MPS_TRACE, "%s NULL handle for target %u\n",
 1574                     __func__, csio->ccb_h.target_id);
 1575                 csio->ccb_h.status = CAM_TID_INVALID;
 1576                 xpt_done(ccb);
 1577                 return;
 1578         }
 1579         if (targ->flags & MPS_TARGET_FLAGS_RAID_COMPONENT) {
 1580                 mps_dprint(sc, MPS_TRACE, "%s Raid component no SCSI IO supported %u\n",
 1581                            __func__, csio->ccb_h.target_id);
 1582                 csio->ccb_h.status = CAM_TID_INVALID;
 1583                 xpt_done(ccb);
 1584                 return;
 1585         }
 1586 
 1587         /*
 1588          * If devinfo is 0 this will be a volume.  In that case don't tell CAM
 1589          * that the volume has timed out.  We want volumes to be enumerated
 1590          * until they are deleted/removed, not just failed.
 1591          */
 1592         if (targ->flags & MPSSAS_TARGET_INREMOVAL) {
 1593                 if (targ->devinfo == 0)
 1594                         csio->ccb_h.status = CAM_REQ_CMP;
 1595                 else
 1596                         csio->ccb_h.status = CAM_SEL_TIMEOUT;
 1597                 xpt_done(ccb);
 1598                 return;
 1599         }
 1600 
 1601         if ((sc->mps_flags & MPS_FLAGS_SHUTDOWN) != 0) {
 1602                 mps_dprint(sc, MPS_TRACE, "%s shutting down\n", __func__);
 1603                 csio->ccb_h.status = CAM_TID_INVALID;
 1604                 xpt_done(ccb);
 1605                 return;
 1606         }
 1607 
 1608         cm = mps_alloc_command(sc);
 1609         if (cm == NULL) {
 1610                 if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
 1611                         xpt_freeze_simq(sassc->sim, 1);
 1612                         sassc->flags |= MPSSAS_QUEUE_FROZEN;
 1613                 }
 1614                 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
 1615                 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
 1616                 xpt_done(ccb);
 1617                 return;
 1618         }
 1619 
 1620         req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
 1621         bzero(req, sizeof(*req));
 1622         req->DevHandle = targ->handle;
 1623         req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
 1624         req->MsgFlags = 0;
 1625         req->SenseBufferLowAddress = cm->cm_sense_busaddr;
 1626         req->SenseBufferLength = MPS_SENSE_LEN;
 1627         req->SGLFlags = 0;
 1628         req->ChainOffset = 0;
 1629         req->SGLOffset0 = 24;   /* 32bit word offset to the SGL */
 1630         req->SGLOffset1= 0;
 1631         req->SGLOffset2= 0;
 1632         req->SGLOffset3= 0;
 1633         req->SkipCount = 0;
 1634         req->DataLength = csio->dxfer_len;
 1635         req->BidirectionalDataLength = 0;
 1636         req->IoFlags = csio->cdb_len;
 1637         req->EEDPFlags = 0;
 1638 
 1639         /* Note: BiDirectional transfers are not supported */
 1640         switch (csio->ccb_h.flags & CAM_DIR_MASK) {
 1641         case CAM_DIR_IN:
 1642                 req->Control = MPI2_SCSIIO_CONTROL_READ;
 1643                 cm->cm_flags |= MPS_CM_FLAGS_DATAIN;
 1644                 break;
 1645         case CAM_DIR_OUT:
 1646                 req->Control = MPI2_SCSIIO_CONTROL_WRITE;
 1647                 cm->cm_flags |= MPS_CM_FLAGS_DATAOUT;
 1648                 break;
 1649         case CAM_DIR_NONE:
 1650         default:
 1651                 req->Control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
 1652                 break;
 1653         }
 1654 
 1655         /*
 1656          * It looks like the hardware doesn't require an explicit tag
 1657          * number for each transaction.  SAM Task Management not supported
 1658          * at the moment.
 1659          */
 1660         switch (csio->tag_action) {
 1661         case MSG_HEAD_OF_Q_TAG:
 1662                 req->Control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
 1663                 break;
 1664         case MSG_ORDERED_Q_TAG:
 1665                 req->Control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
 1666                 break;
 1667         case MSG_ACA_TASK:
 1668                 req->Control |= MPI2_SCSIIO_CONTROL_ACAQ;
 1669                 break;
 1670         case CAM_TAG_ACTION_NONE:
 1671         case MSG_SIMPLE_Q_TAG:
 1672         default:
 1673                 req->Control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
 1674                 break;
 1675         }
 1676         req->Control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
 1677 
 1678         if (MPS_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
 1679                 mps_free_command(sc, cm);
 1680                 ccb->ccb_h.status = CAM_LUN_INVALID;
 1681                 xpt_done(ccb);
 1682                 return;
 1683         }
 1684 
 1685         if (csio->ccb_h.flags & CAM_CDB_POINTER)
 1686                 bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
 1687         else
 1688                 bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
 1689         req->IoFlags = csio->cdb_len;
 1690 
 1691         /*
 1692          * Check if EEDP is supported and enabled.  If it is then check if the
 1693          * SCSI opcode could be using EEDP.  If so, make sure the LUN exists and
 1694          * is formatted for EEDP support.  If all of this is true, set CDB up
 1695          * for EEDP transfer.
 1696          */
 1697         eedp_flags = op_code_prot[req->CDB.CDB32[0]];
 1698         if (sc->eedp_enabled && eedp_flags) {
 1699                 SLIST_FOREACH(lun, &targ->luns, lun_link) {
 1700                         if (lun->lun_id == csio->ccb_h.target_lun) {
 1701                                 break;
 1702                         }
 1703                 }
 1704 
 1705                 if ((lun != NULL) && (lun->eedp_formatted)) {
 1706                         req->EEDPBlockSize = lun->eedp_block_size;
 1707                         eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
 1708                             MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
 1709                             MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
 1710                         req->EEDPFlags = eedp_flags;
 1711 
 1712                         /*
 1713                          * If CDB less than 32, fill in Primary Ref Tag with
 1714                          * low 4 bytes of LBA.  If CDB is 32, tag stuff is
 1715                          * already there.  Also, set protection bit.  FreeBSD
 1716                          * currently does not support CDBs bigger than 16, but
 1717                          * the code doesn't hurt, and will be here for the
 1718                          * future.
 1719                          */
 1720                         if (csio->cdb_len != 32) {
 1721                                 lba_byte = (csio->cdb_len == 16) ? 6 : 2;
 1722                                 ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
 1723                                     PrimaryReferenceTag;
 1724                                 for (i = 0; i < 4; i++) {
 1725                                         *ref_tag_addr =
 1726                                             req->CDB.CDB32[lba_byte + i];
 1727                                         ref_tag_addr++;
 1728                                 }
 1729                                 req->CDB.EEDP32.PrimaryApplicationTagMask =
 1730                                     0xFFFF;
 1731                                 req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) |
 1732                                     0x20;
 1733                         } else {
 1734                                 eedp_flags |=
 1735                                     MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
 1736                                 req->EEDPFlags = eedp_flags;
 1737                                 req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
 1738                                     0x1F) | 0x20;
 1739                         }
 1740                 }
 1741         }
 1742 
 1743         cm->cm_data = csio->data_ptr;
 1744         cm->cm_length = csio->dxfer_len;
 1745         cm->cm_sge = &req->SGL;
 1746         cm->cm_sglsize = (32 - 24) * 4;
 1747         cm->cm_desc.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
 1748         cm->cm_desc.SCSIIO.DevHandle = targ->handle;
 1749         cm->cm_complete = mpssas_scsiio_complete;
 1750         cm->cm_complete_data = ccb;
 1751         cm->cm_targ = targ;
 1752         cm->cm_lun = csio->ccb_h.target_lun;
 1753         cm->cm_ccb = ccb;
 1754 
 1755         /*
 1756          * If HBA is a WD and the command is not for a retry, try to build a
 1757          * direct I/O message. If failed, or the command is for a retry, send
 1758          * the I/O to the IR volume itself.
 1759          */
 1760         if (sc->WD_valid_config) {
 1761                 if (ccb->ccb_h.status != MPS_WD_RETRY) {
 1762                         mpssas_direct_drive_io(sassc, cm, ccb);
 1763                 } else {
 1764                         ccb->ccb_h.status = CAM_REQ_INPROG;
 1765                 }
 1766         }
 1767 
 1768         callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000,
 1769            mpssas_scsiio_timeout, cm);
 1770 
 1771         targ->issued++;
 1772         targ->outstanding++;
 1773         TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
 1774 
 1775         if ((sc->mps_debug & MPS_TRACE) != 0)
 1776                 mpssas_log_command(cm, "%s cm %p ccb %p outstanding %u\n",
 1777                     __func__, cm, ccb, targ->outstanding);
 1778 
 1779         mps_map_command(sc, cm);
 1780         return;
 1781 }
 1782 
 1783 static void
 1784 mpssas_scsiio_complete(struct mps_softc *sc, struct mps_command *cm)
 1785 {
 1786         MPI2_SCSI_IO_REPLY *rep;
 1787         union ccb *ccb;
 1788         struct ccb_scsiio *csio;
 1789         struct mpssas_softc *sassc;
 1790         struct scsi_vpd_supported_page_list *vpd_list = NULL;
 1791         u8 *TLR_bits, TLR_on;
 1792         int dir = 0, i;
 1793         u16 alloc_len;
 1794 
 1795         mps_dprint(sc, MPS_TRACE,
 1796             "%s cm %p SMID %u ccb %p reply %p outstanding %u\n",
 1797             __func__, cm, cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
 1798             cm->cm_targ->outstanding);
 1799 
 1800         callout_stop(&cm->cm_callout);
 1801         KKASSERT(lockstatus(&sc->mps_lock, curthread) != 0);
 1802 
 1803         sassc = sc->sassc;
 1804         ccb = cm->cm_complete_data;
 1805         csio = &ccb->csio;
 1806         rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
 1807         /*
 1808          * XXX KDM if the chain allocation fails, does it matter if we do
 1809          * the sync and unload here?  It is simpler to do it in every case,
 1810          * assuming it doesn't cause problems.
 1811          */
 1812         if (cm->cm_data != NULL) {
 1813                 if (cm->cm_flags & MPS_CM_FLAGS_DATAIN)
 1814                         dir = BUS_DMASYNC_POSTREAD;
 1815                 else if (cm->cm_flags & MPS_CM_FLAGS_DATAOUT)
 1816                         dir = BUS_DMASYNC_POSTWRITE;
 1817                 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
 1818                 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
 1819         }
 1820 
 1821         cm->cm_targ->completed++;
 1822         cm->cm_targ->outstanding--;
 1823         TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
 1824 
 1825         if (cm->cm_state == MPS_CM_STATE_TIMEDOUT) {
 1826                 TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
 1827                 if (cm->cm_reply != NULL)
 1828                         mpssas_log_command(cm,
 1829                             "completed timedout cm %p ccb %p during recovery "
 1830                             "ioc %x scsi %x state %x xfer %u\n",
 1831                             cm, cm->cm_ccb,
 1832                             rep->IOCStatus, rep->SCSIStatus, rep->SCSIState,
 1833                             rep->TransferCount);
 1834                 else
 1835                         mpssas_log_command(cm,
 1836                             "completed timedout cm %p ccb %p during recovery\n",
 1837                             cm, cm->cm_ccb);
 1838         } else if (cm->cm_targ->tm != NULL) {
 1839                 if (cm->cm_reply != NULL)
 1840                         mpssas_log_command(cm,
 1841                             "completed cm %p ccb %p during recovery "
 1842                             "ioc %x scsi %x state %x xfer %u\n",
 1843                             cm, cm->cm_ccb,
 1844                             rep->IOCStatus, rep->SCSIStatus, rep->SCSIState,
 1845                             rep->TransferCount);
 1846                 else
 1847                         mpssas_log_command(cm,
 1848                             "completed cm %p ccb %p during recovery\n",
 1849                             cm, cm->cm_ccb);
 1850         } else if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
 1851                 mpssas_log_command(cm,
 1852                     "reset completed cm %p ccb %p\n",
 1853                     cm, cm->cm_ccb);
 1854         }
 1855 
 1856         if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
 1857                 /*
 1858                  * We ran into an error after we tried to map the command,
 1859                  * so we're getting a callback without queueing the command
 1860                  * to the hardware.  So we set the status here, and it will
 1861                  * be retained below.  We'll go through the "fast path",
 1862                  * because there can be no reply when we haven't actually
 1863                  * gone out to the hardware.
 1864                  */
 1865                 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
 1866 
 1867                 /*
 1868                  * Currently the only error included in the mask is
 1869                  * MPS_CM_FLAGS_CHAIN_FAILED, which means we're out of
 1870                  * chain frames.  We need to freeze the queue until we get
 1871                  * a command that completed without this error, which will
 1872                  * hopefully have some chain frames attached that we can
 1873                  * use.  If we wanted to get smarter about it, we would
 1874                  * only unfreeze the queue in this condition when we're
 1875                  * sure that we're getting some chain frames back.  That's
 1876                  * probably unnecessary.
 1877                  */
 1878                 if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
 1879                         xpt_freeze_simq(sassc->sim, 1);
 1880                         sassc->flags |= MPSSAS_QUEUE_FROZEN;
 1881                         mps_dprint(sc, MPS_INFO, "Error sending command, "
 1882                                    "freezing SIM queue\n");
 1883                 }
 1884         }
 1885 
 1886         /* Take the fast path to completion */
 1887         if (cm->cm_reply == NULL) {
 1888                 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
 1889                         if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0)
 1890                                 ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
 1891                         else {
 1892                                 ccb->ccb_h.status = CAM_REQ_CMP;
 1893                                 ccb->csio.scsi_status = SCSI_STATUS_OK;
 1894                         }
 1895                         if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
 1896                                 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
 1897                                 sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
 1898                                 mps_dprint(sc, MPS_INFO,
 1899                                            "Unfreezing SIM queue\n");
 1900                         }
 1901                 }
 1902 
 1903                 /*
 1904                  * There are two scenarios where the status won't be
 1905                  * CAM_REQ_CMP.  The first is if MPS_CM_FLAGS_ERROR_MASK is
 1906                  * set, the second is in the MPS_FLAGS_DIAGRESET above.
 1907                  */
 1908                 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
 1909                         /*
 1910                          * Freeze the dev queue so that commands are
 1911                          * executed in the correct order with after error
 1912                          * recovery.
 1913                          */
 1914                         ccb->ccb_h.status |= CAM_DEV_QFRZN;
 1915                         xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
 1916                 }
 1917                 mps_free_command(sc, cm);
 1918                 xpt_done(ccb);
 1919                 return;
 1920         }
 1921 
 1922         if (sc->mps_debug & MPS_TRACE)
 1923                 mpssas_log_command(cm,
 1924                     "ioc %x scsi %x state %x xfer %u\n",
 1925                     rep->IOCStatus, rep->SCSIStatus,
 1926                     rep->SCSIState, rep->TransferCount);
 1927 
 1928         /*
 1929          * If this is a Direct Drive I/O, reissue the I/O to the original IR
 1930          * Volume if an error occurred (normal I/O retry).  Use the original
 1931          * CCB, but set a flag that this will be a retry so that it's sent to
 1932          * the original volume.  Free the command but reuse the CCB.
 1933          */
 1934         if (cm->cm_flags & MPS_CM_FLAGS_DD_IO) {
 1935                 mps_free_command(sc, cm);
 1936                 ccb->ccb_h.status = MPS_WD_RETRY;
 1937                 mpssas_action_scsiio(sassc, ccb);
 1938                 return;
 1939         }
 1940 
 1941         switch (rep->IOCStatus & MPI2_IOCSTATUS_MASK) {
 1942         case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
 1943                 csio->resid = cm->cm_length - rep->TransferCount;
 1944                 /* FALLTHROUGH */
 1945         case MPI2_IOCSTATUS_SUCCESS:
 1946         case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
 1947 
 1948                 if ((rep->IOCStatus & MPI2_IOCSTATUS_MASK) ==
 1949                     MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
 1950                         mpssas_log_command(cm, "recovered error\n");
 1951 
 1952                 /* Completion failed at the transport level. */
 1953                 if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
 1954                     MPI2_SCSI_STATE_TERMINATED)) {
 1955                         ccb->ccb_h.status = CAM_REQ_CMP_ERR;
 1956                         break;
 1957                 }
 1958 
 1959                 /* In a modern packetized environment, an autosense failure
 1960                  * implies that there's not much else that can be done to
 1961                  * recover the command.
 1962                  */
 1963                 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
 1964                         ccb->ccb_h.status = CAM_AUTOSENSE_FAIL;
 1965                         break;
 1966                 }
 1967 
 1968                 /*
 1969                  * CAM doesn't care about SAS Response Info data, but if this is
 1970                  * the state check if TLR should be done.  If not, clear the
 1971                  * TLR_bits for the target.
 1972                  */
 1973                 if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
 1974                     ((rep->ResponseInfo & MPI2_SCSI_RI_MASK_REASONCODE) ==
 1975                     MPS_SCSI_RI_INVALID_FRAME)) {
 1976                         sc->mapping_table[csio->ccb_h.target_id].TLR_bits =
 1977                             (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
 1978                 }
 1979 
 1980                 /*
 1981                  * Intentionally override the normal SCSI status reporting
 1982                  * for these two cases.  These are likely to happen in a
 1983                  * multi-initiator environment, and we want to make sure that
 1984                  * CAM retries these commands rather than fail them.
 1985                  */
 1986                 if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
 1987                     (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
 1988                         ccb->ccb_h.status = CAM_REQ_ABORTED;
 1989                         break;
 1990                 }
 1991 
 1992                 /* Handle normal status and sense */
 1993                 csio->scsi_status = rep->SCSIStatus;
 1994                 if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
 1995                         ccb->ccb_h.status = CAM_REQ_CMP;
 1996                 else
 1997                         ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
 1998 
 1999                 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
 2000                         int sense_len, returned_sense_len;
 2001 
 2002                         returned_sense_len = min(rep->SenseCount,
 2003                             sizeof(struct scsi_sense_data));
 2004                         if (returned_sense_len < ccb->csio.sense_len)
 2005                                 ccb->csio.sense_resid = ccb->csio.sense_len -
 2006                                         returned_sense_len;
 2007                         else
 2008                                 ccb->csio.sense_resid = 0;
 2009 
 2010                         sense_len = min(returned_sense_len,
 2011                             ccb->csio.sense_len - ccb->csio.sense_resid);
 2012                         bzero(&ccb->csio.sense_data,
 2013                               sizeof(ccb->csio.sense_data));
 2014                         bcopy(cm->cm_sense, &ccb->csio.sense_data, sense_len);
 2015                         ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
 2016                 }
 2017 
 2018                 /*
 2019                  * Check if this is an INQUIRY command.  If it's a VPD inquiry,
 2020                  * and it's page code 0 (Supported Page List), and there is
 2021                  * inquiry data, and this is for a sequential access device, and
 2022                  * the device is an SSP target, and TLR is supported by the
 2023                  * controller, turn the TLR_bits value ON if page 0x90 is
 2024                  * supported.
 2025                  */
 2026                 if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
 2027                     (csio->cdb_io.cdb_bytes[1] & SI_EVPD) &&
 2028                     (csio->cdb_io.cdb_bytes[2] == SVPD_SUPPORTED_PAGE_LIST) &&
 2029                     (csio->data_ptr != NULL) && (((uint8_t *)cm->cm_data)[0] ==
 2030                     T_SEQUENTIAL) && (sc->control_TLR) &&
 2031                     (sc->mapping_table[csio->ccb_h.target_id].device_info &
 2032                     MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
 2033                         vpd_list = (struct scsi_vpd_supported_page_list *)
 2034                             csio->data_ptr;
 2035                         TLR_bits = &sc->mapping_table[csio->ccb_h.target_id].
 2036                             TLR_bits;
 2037                         *TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
 2038                         TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
 2039                         alloc_len = ((u16)csio->cdb_io.cdb_bytes[3] << 8) +
 2040                             csio->cdb_io.cdb_bytes[4];
 2041                         for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
 2042                                 if (vpd_list->list[i] == 0x90) {
 2043                                         *TLR_bits = TLR_on;
 2044                                         break;
 2045                                 }
 2046                         }
 2047                 }
 2048                 break;
 2049         case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
 2050         case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
 2051                 /*
 2052                  * If devinfo is 0 this will be a volume.  In that case don't
 2053                  * tell CAM that the volume is not there.  We want volumes to
 2054                  * be enumerated until they are deleted/removed, not just
 2055                  * failed.
 2056                  */
 2057                 if (cm->cm_targ->devinfo == 0)
 2058                         ccb->ccb_h.status = CAM_REQ_CMP;
 2059                 else
 2060                         ccb->ccb_h.status = CAM_DEV_NOT_THERE;
 2061                 break;
 2062         case MPI2_IOCSTATUS_INVALID_SGL:
 2063                 mps_print_scsiio_cmd(sc, cm);
 2064                 ccb->ccb_h.status = CAM_UNREC_HBA_ERROR;
 2065                 break;
 2066         case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
 2067                 /*
 2068                  * This is one of the responses that comes back when an I/O
 2069                  * has been aborted.  If it is because of a timeout that we
 2070                  * initiated, just set the status to CAM_CMD_TIMEOUT.
 2071                  * Otherwise set it to CAM_REQ_ABORTED.  The effect on the
 2072                  * command is the same (it gets retried, subject to the
 2073                  * retry counter), the only difference is what gets printed
 2074                  * on the console.
 2075                  */
 2076                 if (cm->cm_state == MPS_CM_STATE_TIMEDOUT)
 2077                         ccb->ccb_h.status = CAM_CMD_TIMEOUT;
 2078                 else
 2079                         ccb->ccb_h.status = CAM_REQ_ABORTED;
 2080                 break;
 2081         case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
 2082                 /* resid is ignored for this condition */
 2083                 csio->resid = 0;
 2084                 ccb->ccb_h.status = CAM_DATA_RUN_ERR;
 2085                 break;
 2086         case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
 2087         case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
 2088                 /*
 2089                  * Since these are generally external (i.e. hopefully
 2090                  * transient transport-related) errors, retry these without
 2091                  * decrementing the retry count.
 2092                  */
 2093                 ccb->ccb_h.status = CAM_REQUEUE_REQ;
 2094                 mpssas_log_command(cm,
 2095                     "terminated ioc %x scsi %x state %x xfer %u\n",
 2096                     rep->IOCStatus, rep->SCSIStatus, rep->SCSIState,
 2097                     rep->TransferCount);
 2098                 break;
 2099         case MPI2_IOCSTATUS_INVALID_FUNCTION:
 2100         case MPI2_IOCSTATUS_INTERNAL_ERROR:
 2101         case MPI2_IOCSTATUS_INVALID_VPID:
 2102         case MPI2_IOCSTATUS_INVALID_FIELD:
 2103         case MPI2_IOCSTATUS_INVALID_STATE:
 2104         case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
 2105         case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
 2106         case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
 2107         case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
 2108         case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
 2109         default:
 2110                 mpssas_log_command(cm,
 2111                     "completed ioc %x scsi %x state %x xfer %u\n",
 2112                     rep->IOCStatus, rep->SCSIStatus, rep->SCSIState,
 2113                     rep->TransferCount);
 2114                 csio->resid = cm->cm_length;
 2115                 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
 2116                 break;
 2117         }
 2118 
 2119         if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
 2120                 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
 2121                 sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
 2122                 mps_dprint(sc, MPS_INFO, "Command completed, "
 2123                            "unfreezing SIM queue\n");
 2124         }
 2125 
 2126         if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
 2127                 ccb->ccb_h.status |= CAM_DEV_QFRZN;
 2128                 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
 2129         }
 2130 
 2131         mps_free_command(sc, cm);
 2132         xpt_done(ccb);
 2133 }
 2134 
 2135 static void
 2136 mpssas_direct_drive_io(struct mpssas_softc *sassc, struct mps_command *cm,
 2137     union ccb *ccb) {
 2138         pMpi2SCSIIORequest_t    pIO_req;
 2139         struct mps_softc        *sc = sassc->sc;
 2140         uint64_t                virtLBA;
 2141         uint32_t                physLBA, stripe_offset, stripe_unit;
 2142         uint32_t                io_size, column;
 2143         uint8_t                 *ptrLBA, lba_idx, physLBA_byte, *CDB;
 2144 
 2145         /*
 2146          * If this is a valid SCSI command (Read6, Read10, Read16, Write6,
 2147          * Write10, or Write16), build a direct I/O message.  Otherwise, the I/O
 2148          * will be sent to the IR volume itself.  Since Read6 and Write6 are a
 2149          * bit different than the 10/16 CDBs, handle them separately.
 2150          */
 2151         pIO_req = (pMpi2SCSIIORequest_t)cm->cm_req;
 2152         CDB = pIO_req->CDB.CDB32;
 2153 
 2154         /*
 2155          * Handle 6 byte CDBs.
 2156          */
 2157         if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_6) ||
 2158             (CDB[0] == WRITE_6))) {
 2159                 /*
 2160                  * Get the transfer size in blocks.
 2161                  */
 2162                 io_size = (cm->cm_length >> sc->DD_block_exponent);
 2163 
 2164                 /*
 2165                  * Get virtual LBA given in the CDB.
 2166                  */
 2167                 virtLBA = ((uint64_t)(CDB[1] & 0x1F) << 16) |
 2168                     ((uint64_t)CDB[2] << 8) | (uint64_t)CDB[3];
 2169 
 2170                 /*
 2171                  * Check that LBA range for I/O does not exceed volume's
 2172                  * MaxLBA.
 2173                  */
 2174                 if ((virtLBA + (uint64_t)io_size - 1) <=
 2175                     sc->DD_max_lba) {
 2176                         /*
 2177                          * Check if the I/O crosses a stripe boundary.  If not,
 2178                          * translate the virtual LBA to a physical LBA and set
 2179                          * the DevHandle for the PhysDisk to be used.  If it
 2180                          * does cross a boundry, do normal I/O.  To get the
 2181                          * right DevHandle to use, get the map number for the
 2182                          * column, then use that map number to look up the
 2183                          * DevHandle of the PhysDisk.
 2184                          */
 2185                         stripe_offset = (uint32_t)virtLBA &
 2186                             (sc->DD_stripe_size - 1);
 2187                         if ((stripe_offset + io_size) <= sc->DD_stripe_size) {
 2188                                 physLBA = (uint32_t)virtLBA >>
 2189                                     sc->DD_stripe_exponent;
 2190                                 stripe_unit = physLBA / sc->DD_num_phys_disks;
 2191                                 column = physLBA % sc->DD_num_phys_disks;
 2192                                 pIO_req->DevHandle =
 2193                                     sc->DD_column_map[column].dev_handle;
 2194                                 cm->cm_desc.SCSIIO.DevHandle =
 2195                                     pIO_req->DevHandle;
 2196 
 2197                                 physLBA = (stripe_unit <<
 2198                                     sc->DD_stripe_exponent) + stripe_offset;
 2199                                 ptrLBA = &pIO_req->CDB.CDB32[1];
 2200                                 physLBA_byte = (uint8_t)(physLBA >> 16);
 2201                                 *ptrLBA = physLBA_byte;
 2202                                 ptrLBA = &pIO_req->CDB.CDB32[2];
 2203                                 physLBA_byte = (uint8_t)(physLBA >> 8);
 2204                                 *ptrLBA = physLBA_byte;
 2205                                 ptrLBA = &pIO_req->CDB.CDB32[3];
 2206                                 physLBA_byte = (uint8_t)physLBA;
 2207                                 *ptrLBA = physLBA_byte;
 2208 
 2209                                 /*
 2210                                  * Set flag that Direct Drive I/O is
 2211                                  * being done.
 2212                                  */
 2213                                 cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
 2214                         }
 2215                 }
 2216                 return;
 2217         }
 2218 
 2219         /*
 2220          * Handle 10 or 16 byte CDBs.
 2221          */
 2222         if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_10) ||
 2223             (CDB[0] == WRITE_10) || (CDB[0] == READ_16) ||
 2224             (CDB[0] == WRITE_16))) {
 2225                 /*
 2226                  * For 16-byte CDB's, verify that the upper 4 bytes of the CDB
 2227                  * are 0.  If not, this is accessing beyond 2TB so handle it in
 2228                  * the else section.  10-byte CDB's are OK.
 2229                  */
 2230                 if ((CDB[0] < READ_16) ||
 2231                     !(CDB[2] | CDB[3] | CDB[4] | CDB[5])) {
 2232                         /*
 2233                          * Get the transfer size in blocks.
 2234                          */
 2235                         io_size = (cm->cm_length >> sc->DD_block_exponent);
 2236 
 2237                         /*
 2238                          * Get virtual LBA.  Point to correct lower 4 bytes of
 2239                          * LBA in the CDB depending on command.
 2240                          */
 2241                         lba_idx = (CDB[0] < READ_16) ? 2 : 6;
 2242                         virtLBA = ((uint64_t)CDB[lba_idx] << 24) |
 2243                             ((uint64_t)CDB[lba_idx + 1] << 16) |
 2244                             ((uint64_t)CDB[lba_idx + 2] << 8) |
 2245                             (uint64_t)CDB[lba_idx + 3];
 2246 
 2247                         /*
 2248                          * Check that LBA range for I/O does not exceed volume's
 2249                          * MaxLBA.
 2250                          */
 2251                         if ((virtLBA + (uint64_t)io_size - 1) <=
 2252                             sc->DD_max_lba) {
 2253                                 /*
 2254                                  * Check if the I/O crosses a stripe boundary.
 2255                                  * If not, translate the virtual LBA to a
 2256                                  * physical LBA and set the DevHandle for the
 2257                                  * PhysDisk to be used.  If it does cross a
 2258                                  * boundry, do normal I/O.  To get the right
 2259                                  * DevHandle to use, get the map number for the
 2260                                  * column, then use that map number to look up
 2261                                  * the DevHandle of the PhysDisk.
 2262                                  */
 2263                                 stripe_offset = (uint32_t)virtLBA &
 2264                                     (sc->DD_stripe_size - 1);
 2265                                 if ((stripe_offset + io_size) <=
 2266                                     sc->DD_stripe_size) {
 2267                                         physLBA = (uint32_t)virtLBA >>
 2268                                             sc->DD_stripe_exponent;
 2269                                         stripe_unit = physLBA /
 2270                                             sc->DD_num_phys_disks;
 2271                                         column = physLBA %
 2272                                             sc->DD_num_phys_disks;
 2273                                         pIO_req->DevHandle =
 2274                                             sc->DD_column_map[column].
 2275                                             dev_handle;
 2276                                         cm->cm_desc.SCSIIO.DevHandle =
 2277                                             pIO_req->DevHandle;
 2278 
 2279                                         physLBA = (stripe_unit <<
 2280                                             sc->DD_stripe_exponent) +
 2281                                             stripe_offset;
 2282                                         ptrLBA =
 2283                                             &pIO_req->CDB.CDB32[lba_idx];
 2284                                         physLBA_byte = (uint8_t)(physLBA >> 24);
 2285                                         *ptrLBA = physLBA_byte;
 2286                                         ptrLBA =
 2287                                             &pIO_req->CDB.CDB32[lba_idx + 1];
 2288                                         physLBA_byte = (uint8_t)(physLBA >> 16);
 2289                                         *ptrLBA = physLBA_byte;
 2290                                         ptrLBA =
 2291                                             &pIO_req->CDB.CDB32[lba_idx + 2];
 2292                                         physLBA_byte = (uint8_t)(physLBA >> 8);
 2293                                         *ptrLBA = physLBA_byte;
 2294                                         ptrLBA =
 2295                                             &pIO_req->CDB.CDB32[lba_idx + 3];
 2296                                         physLBA_byte = (uint8_t)physLBA;
 2297                                         *ptrLBA = physLBA_byte;
 2298 
 2299                                         /*
 2300                                          * Set flag that Direct Drive I/O is
 2301                                          * being done.
 2302                                          */
 2303                                         cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
 2304                                 }
 2305                         }
 2306                 } else {
 2307                         /*
 2308                          * 16-byte CDB and the upper 4 bytes of the CDB are not
 2309                          * 0.  Get the transfer size in blocks.
 2310                          */
 2311                         io_size = (cm->cm_length >> sc->DD_block_exponent);
 2312 
 2313                         /*
 2314                          * Get virtual LBA.
 2315                          */
 2316                         virtLBA = ((uint64_t)CDB[2] << 54) |
 2317                             ((uint64_t)CDB[3] << 48) |
 2318                             ((uint64_t)CDB[4] << 40) |
 2319                             ((uint64_t)CDB[5] << 32) |
 2320                             ((uint64_t)CDB[6] << 24) |
 2321                             ((uint64_t)CDB[7] << 16) |
 2322                             ((uint64_t)CDB[8] << 8) |
 2323                             (uint64_t)CDB[9];
 2324 
 2325                         /*
 2326                          * Check that LBA range for I/O does not exceed volume's
 2327                          * MaxLBA.
 2328                          */
 2329                         if ((virtLBA + (uint64_t)io_size - 1) <=
 2330                             sc->DD_max_lba) {
 2331                                 /*
 2332                                  * Check if the I/O crosses a stripe boundary.
 2333                                  * If not, translate the virtual LBA to a
 2334                                  * physical LBA and set the DevHandle for the
 2335                                  * PhysDisk to be used.  If it does cross a
 2336                                  * boundry, do normal I/O.  To get the right
 2337                                  * DevHandle to use, get the map number for the
 2338                                  * column, then use that map number to look up
 2339                                  * the DevHandle of the PhysDisk.
 2340                                  */
 2341                                 stripe_offset = (uint32_t)virtLBA &
 2342                                     (sc->DD_stripe_size - 1);
 2343                                 if ((stripe_offset + io_size) <=
 2344                                     sc->DD_stripe_size) {
 2345                                         physLBA = (uint32_t)(virtLBA >>
 2346                                             sc->DD_stripe_exponent);
 2347                                         stripe_unit = physLBA /
 2348                                             sc->DD_num_phys_disks;
 2349                                         column = physLBA %
 2350                                             sc->DD_num_phys_disks;
 2351                                         pIO_req->DevHandle =
 2352                                             sc->DD_column_map[column].
 2353                                             dev_handle;
 2354                                         cm->cm_desc.SCSIIO.DevHandle =
 2355                                             pIO_req->DevHandle;
 2356 
 2357                                         physLBA = (stripe_unit <<
 2358                                             sc->DD_stripe_exponent) +
 2359                                             stripe_offset;
 2360 
 2361                                         /*
 2362                                          * Set upper 4 bytes of LBA to 0.  We
 2363                                          * assume that the phys disks are less
 2364                                          * than 2 TB's in size.  Then, set the
 2365                                          * lower 4 bytes.
 2366                                          */
 2367                                         pIO_req->CDB.CDB32[2] = 0;
 2368                                         pIO_req->CDB.CDB32[3] = 0;
 2369                                         pIO_req->CDB.CDB32[4] = 0;
 2370                                         pIO_req->CDB.CDB32[5] = 0;
 2371                                         ptrLBA = &pIO_req->CDB.CDB32[6];
 2372                                         physLBA_byte = (uint8_t)(physLBA >> 24);
 2373                                         *ptrLBA = physLBA_byte;
 2374                                         ptrLBA = &pIO_req->CDB.CDB32[7];
 2375                                         physLBA_byte = (uint8_t)(physLBA >> 16);
 2376                                         *ptrLBA = physLBA_byte;
 2377                                         ptrLBA = &pIO_req->CDB.CDB32[8];
 2378                                         physLBA_byte = (uint8_t)(physLBA >> 8);
 2379                                         *ptrLBA = physLBA_byte;
 2380                                         ptrLBA = &pIO_req->CDB.CDB32[9];
 2381                                         physLBA_byte = (uint8_t)physLBA;
 2382                                         *ptrLBA = physLBA_byte;
 2383 
 2384                                         /*
 2385                                          * Set flag that Direct Drive I/O is
 2386                                          * being done.
 2387                                          */
 2388                                         cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
 2389                                 }
 2390                         }
 2391                 }
 2392         }
 2393 }
 2394 
 2395 #if __FreeBSD_version >= 900026
 2396 static void
 2397 mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm)
 2398 {
 2399         MPI2_SMP_PASSTHROUGH_REPLY *rpl;
 2400         MPI2_SMP_PASSTHROUGH_REQUEST *req;
 2401         uint64_t sasaddr;
 2402         union ccb *ccb;
 2403 
 2404         ccb = cm->cm_complete_data;
 2405 
 2406         /*
 2407          * Currently there should be no way we can hit this case.  It only
 2408          * happens when we have a failure to allocate chain frames, and SMP
 2409          * commands require two S/G elements only.  That should be handled
 2410          * in the standard request size.
 2411          */
 2412         if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
 2413                 mps_printf(sc, "%s: cm_flags = %#x on SMP request!\n",
 2414                            __func__, cm->cm_flags);
 2415                 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
 2416                 goto bailout;
 2417         }
 2418 
 2419         rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
 2420         if (rpl == NULL) {
 2421                 mps_dprint(sc, MPS_INFO, "%s: NULL cm_reply!\n", __func__);
 2422                 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
 2423                 goto bailout;
 2424         }
 2425 
 2426         req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
 2427         sasaddr = le32toh(req->SASAddress.Low);
 2428         sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
 2429 
 2430         if ((rpl->IOCStatus & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS ||
 2431             rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
 2432                 mps_dprint(sc, MPS_INFO, "%s: IOCStatus %04x SASStatus %02x\n",
 2433                     __func__, rpl->IOCStatus, rpl->SASStatus);
 2434                 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
 2435                 goto bailout;
 2436         }
 2437 
 2438         mps_dprint(sc, MPS_INFO, "%s: SMP request to SAS address "
 2439                    "%#jx completed successfully\n", __func__,
 2440                    (uintmax_t)sasaddr);
 2441 
 2442         if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
 2443                 ccb->ccb_h.status = CAM_REQ_CMP;
 2444         else
 2445                 ccb->ccb_h.status = CAM_SMP_STATUS_ERROR;
 2446 
 2447 bailout:
 2448         /*
 2449          * We sync in both directions because we had DMAs in the S/G list
 2450          * in both directions.
 2451          */
 2452         bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
 2453                         BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
 2454         bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
 2455         mps_free_command(sc, cm);
 2456         xpt_done(ccb);
 2457 }
 2458 
 2459 static void
 2460 mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
 2461 {
 2462         struct mps_command *cm;
 2463         uint8_t *request, *response;
 2464         MPI2_SMP_PASSTHROUGH_REQUEST *req;
 2465         struct mps_softc *sc;
 2466         int error;
 2467 
 2468         sc = sassc->sc;
 2469         error = 0;
 2470 
 2471         /*
 2472          * XXX We don't yet support physical addresses here.
 2473          */
 2474         if (ccb->ccb_h.flags & (CAM_DATA_PHYS|CAM_SG_LIST_PHYS)) {
 2475                 mps_printf(sc, "%s: physical addresses not supported\n",
 2476                            __func__);
 2477                 ccb->ccb_h.status = CAM_REQ_INVALID;
 2478                 xpt_done(ccb);
 2479                 return;
 2480         }
 2481 
 2482         /*
 2483          * If the user wants to send an S/G list, check to make sure they
 2484          * have single buffers.
 2485          */
 2486         if (ccb->ccb_h.flags & CAM_SCATTER_VALID) {
 2487                 /*
 2488                  * The chip does not support more than one buffer for the
 2489                  * request or response.
 2490                  */
 2491                 if ((ccb->smpio.smp_request_sglist_cnt > 1)
 2492                   || (ccb->smpio.smp_response_sglist_cnt > 1)) {
 2493                         mps_printf(sc, "%s: multiple request or response "
 2494                                    "buffer segments not supported for SMP\n",
 2495                                    __func__);
 2496                         ccb->ccb_h.status = CAM_REQ_INVALID;
 2497                         xpt_done(ccb);
 2498                         return;
 2499                 }
 2500 
 2501                 /*
 2502                  * The CAM_SCATTER_VALID flag was originally implemented
 2503                  * for the XPT_SCSI_IO CCB, which only has one data pointer.
 2504                  * We have two.  So, just take that flag to mean that we
 2505                  * might have S/G lists, and look at the S/G segment count
 2506                  * to figure out whether that is the case for each individual
 2507                  * buffer.
 2508                  */
 2509                 if (ccb->smpio.smp_request_sglist_cnt != 0) {
 2510                         bus_dma_segment_t *req_sg;
 2511 
 2512                         req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
 2513                         request = (uint8_t *)req_sg[0].ds_addr;
 2514                 } else
 2515                         request = ccb->smpio.smp_request;
 2516 
 2517                 if (ccb->smpio.smp_response_sglist_cnt != 0) {
 2518                         bus_dma_segment_t *rsp_sg;
 2519 
 2520                         rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
 2521                         response = (uint8_t *)rsp_sg[0].ds_addr;
 2522                 } else
 2523                         response = ccb->smpio.smp_response;
 2524         } else {
 2525                 request = ccb->smpio.smp_request;
 2526                 response = ccb->smpio.smp_response;
 2527         }
 2528 
 2529         cm = mps_alloc_command(sc);
 2530         if (cm == NULL) {
 2531                 mps_printf(sc, "%s: cannot allocate command\n", __func__);
 2532                 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
 2533                 xpt_done(ccb);
 2534                 return;
 2535         }
 2536 
 2537         req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
 2538         bzero(req, sizeof(*req));
 2539         req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
 2540 
 2541         /* Allow the chip to use any route to this SAS address. */
 2542         req->PhysicalPort = 0xff;
 2543 
 2544         req->RequestDataLength = ccb->smpio.smp_request_len;
 2545         req->SGLFlags =
 2546             MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
 2547 
 2548         mps_dprint(sc, MPS_INFO, "%s: sending SMP request to SAS "
 2549                    "address %#jx\n", __func__, (uintmax_t)sasaddr);
 2550 
 2551         mpi_init_sge(cm, req, &req->SGL);
 2552 
 2553         /*
 2554          * Set up a uio to pass into mps_map_command().  This allows us to
 2555          * do one map command, and one busdma call in there.
 2556          */
 2557         cm->cm_uio.uio_iov = cm->cm_iovec;
 2558         cm->cm_uio.uio_iovcnt = 2;
 2559         cm->cm_uio.uio_segflg = UIO_SYSSPACE;
 2560 
 2561         /*
 2562          * The read/write flag isn't used by busdma, but set it just in
 2563          * case.  This isn't exactly accurate, either, since we're going in
 2564          * both directions.
 2565          */
 2566         cm->cm_uio.uio_rw = UIO_WRITE;
 2567 
 2568         cm->cm_iovec[0].iov_base = request;
 2569         cm->cm_iovec[0].iov_len = req->RequestDataLength;
 2570         cm->cm_iovec[1].iov_base = response;
 2571         cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
 2572 
 2573         cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
 2574                                cm->cm_iovec[1].iov_len;
 2575 
 2576         /*
 2577          * Trigger a warning message in mps_data_cb() for the user if we
 2578          * wind up exceeding two S/G segments.  The chip expects one
 2579          * segment for the request and another for the response.
 2580          */
 2581         cm->cm_max_segs = 2;
 2582 
 2583         cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
 2584         cm->cm_complete = mpssas_smpio_complete;
 2585         cm->cm_complete_data = ccb;
 2586 
 2587         /*
 2588          * Tell the mapping code that we're using a uio, and that this is
 2589          * an SMP passthrough request.  There is a little special-case
 2590          * logic there (in mps_data_cb()) to handle the bidirectional
 2591          * transfer.
 2592          */
 2593         cm->cm_flags |= MPS_CM_FLAGS_USE_UIO | MPS_CM_FLAGS_SMP_PASS |
 2594                         MPS_CM_FLAGS_DATAIN | MPS_CM_FLAGS_DATAOUT;
 2595 
 2596         /* The chip data format is little endian. */
 2597         req->SASAddress.High = htole32(sasaddr >> 32);
 2598         req->SASAddress.Low = htole32(sasaddr);
 2599 
 2600         /*
 2601          * XXX Note that we don't have a timeout/abort mechanism here.
 2602          * From the manual, it looks like task management requests only
 2603          * work for SCSI IO and SATA passthrough requests.  We may need to
 2604          * have a mechanism to retry requests in the event of a chip reset
 2605          * at least.  Hopefully the chip will insure that any errors short
 2606          * of that are relayed back to the driver.
 2607          */
 2608         error = mps_map_command(sc, cm);
 2609         if ((error != 0) && (error != EINPROGRESS)) {
 2610                 mps_printf(sc, "%s: error %d returned from mps_map_command()\n",
 2611                            __func__, error);
 2612                 goto bailout_error;
 2613         }
 2614 
 2615         return;
 2616 
 2617 bailout_error:
 2618         mps_free_command(sc, cm);
 2619         ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
 2620         xpt_done(ccb);
 2621         return;
 2622 
 2623 }
 2624 
 2625 static void
 2626 mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb)
 2627 {
 2628         struct mps_softc *sc;
 2629         struct mpssas_target *targ;
 2630         uint64_t sasaddr = 0;
 2631 
 2632         sc = sassc->sc;
 2633 
 2634         /*
 2635          * Make sure the target exists.
 2636          */
 2637         targ = &sassc->targets[ccb->ccb_h.target_id];
 2638         if (targ->handle == 0x0) {
 2639                 mps_printf(sc, "%s: target %d does not exist!\n", __func__,
 2640                            ccb->ccb_h.target_id);
 2641                 ccb->ccb_h.status = CAM_SEL_TIMEOUT;
 2642                 xpt_done(ccb);
 2643                 return;
 2644         }
 2645 
 2646         /*
 2647          * If this device has an embedded SMP target, we'll talk to it
 2648          * directly.
 2649          * figure out what the expander's address is.
 2650          */
 2651         if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
 2652                 sasaddr = targ->sasaddr;
 2653 
 2654         /*
 2655          * If we don't have a SAS address for the expander yet, try
 2656          * grabbing it from the page 0x83 information cached in the
 2657          * transport layer for this target.  LSI expanders report the
 2658          * expander SAS address as the port-associated SAS address in
 2659          * Inquiry VPD page 0x83.  Maxim expanders don't report it in page
 2660          * 0x83.
 2661          *
 2662          * XXX KDM disable this for now, but leave it commented out so that
 2663          * it is obvious that this is another possible way to get the SAS
 2664          * address.
 2665          *
 2666          * The parent handle method below is a little more reliable, and
 2667          * the other benefit is that it works for devices other than SES
 2668          * devices.  So you can send a SMP request to a da(4) device and it
 2669          * will get routed to the expander that device is attached to.
 2670          * (Assuming the da(4) device doesn't contain an SMP target...)
 2671          */
 2672 #if 0
 2673         if (sasaddr == 0)
 2674                 sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
 2675 #endif
 2676 
 2677         /*
 2678          * If we still don't have a SAS address for the expander, look for
 2679          * the parent device of this device, which is probably the expander.
 2680          */
 2681         if (sasaddr == 0) {
 2682 #ifdef OLD_MPS_PROBE
 2683                 struct mpssas_target *parent_target;
 2684 #endif
 2685 
 2686                 if (targ->parent_handle == 0x0) {
 2687                         mps_printf(sc, "%s: handle %d does not have a valid "
 2688                                    "parent handle!\n", __func__, targ->handle);
 2689                         ccb->ccb_h.status = CAM_REQ_INVALID;
 2690                         goto bailout;
 2691                 }
 2692 #ifdef OLD_MPS_PROBE
 2693                 parent_target = mpssas_find_target_by_handle(sassc, 0,
 2694                         targ->parent_handle);
 2695 
 2696                 if (parent_target == NULL) {
 2697                         mps_printf(sc, "%s: handle %d does not have a valid "
 2698                                    "parent target!\n", __func__, targ->handle);
 2699                         ccb->ccb_h.status = CAM_REQ_INVALID;
 2700                         goto bailout;
 2701                 }
 2702 
 2703                 if ((parent_target->devinfo &
 2704                      MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
 2705                         mps_printf(sc, "%s: handle %d parent %d does not "
 2706                                    "have an SMP target!\n", __func__,
 2707                                    targ->handle, parent_target->handle);
 2708                         ccb->ccb_h.status = CAM_REQ_INVALID;
 2709                         goto bailout;
 2710 
 2711                 }
 2712 
 2713                 sasaddr = parent_target->sasaddr;
 2714 #else /* OLD_MPS_PROBE */
 2715                 if ((targ->parent_devinfo &
 2716                      MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
 2717                         mps_printf(sc, "%s: handle %d parent %d does not "
 2718                                    "have an SMP target!\n", __func__,
 2719                                    targ->handle, targ->parent_handle);
 2720                         ccb->ccb_h.status = CAM_REQ_INVALID;
 2721                         goto bailout;
 2722 
 2723                 }
 2724                 if (targ->parent_sasaddr == 0x0) {
 2725                         mps_printf(sc, "%s: handle %d parent handle %d does "
 2726                                    "not have a valid SAS address!\n",
 2727                                    __func__, targ->handle, targ->parent_handle);
 2728                         ccb->ccb_h.status = CAM_REQ_INVALID;
 2729                         goto bailout;
 2730                 }
 2731 
 2732                 sasaddr = targ->parent_sasaddr;
 2733 #endif /* OLD_MPS_PROBE */
 2734 
 2735         }
 2736 
 2737         if (sasaddr == 0) {
 2738                 mps_printf(sc, "%s: unable to find SAS address for handle %d\n",
 2739                            __func__, targ->handle);
 2740                 ccb->ccb_h.status = CAM_REQ_INVALID;
 2741                 goto bailout;
 2742         }
 2743         mpssas_send_smpcmd(sassc, ccb, sasaddr);
 2744 
 2745         return;
 2746 
 2747 bailout:
 2748         xpt_done(ccb);
 2749 
 2750 }
 2751 #endif //__FreeBSD_version >= 900026
 2752 
 2753 static void
 2754 mpssas_action_resetdev(struct mpssas_softc *sassc, union ccb *ccb)
 2755 {
 2756         MPI2_SCSI_TASK_MANAGE_REQUEST *req;
 2757         struct mps_softc *sc;
 2758         struct mps_command *tm;
 2759         struct mpssas_target *targ;
 2760 
 2761         mps_dprint(sassc->sc, MPS_TRACE, __func__);
 2762         KKASSERT(lockstatus(&sassc->sc->mps_lock, curthread) != 0);
 2763 
 2764         sc = sassc->sc;
 2765         tm = mps_alloc_command(sc);
 2766         if (tm == NULL) {
 2767                 mps_printf(sc, "command alloc failure in mpssas_action_resetdev\n");
 2768                 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
 2769                 xpt_done(ccb);
 2770                 return;
 2771         }
 2772 
 2773         targ = &sassc->targets[ccb->ccb_h.target_id];
 2774         req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
 2775         req->DevHandle = targ->handle;
 2776         req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
 2777         req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
 2778 
 2779         /* SAS Hard Link Reset / SATA Link Reset */
 2780         req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
 2781 
 2782         tm->cm_data = NULL;
 2783         tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
 2784         tm->cm_complete = mpssas_resetdev_complete;
 2785         tm->cm_complete_data = ccb;
 2786         tm->cm_targ = targ;
 2787         mps_map_command(sc, tm);
 2788 }
 2789 
 2790 static void
 2791 mpssas_resetdev_complete(struct mps_softc *sc, struct mps_command *tm)
 2792 {
 2793         MPI2_SCSI_TASK_MANAGE_REPLY *resp;
 2794         union ccb *ccb;
 2795 
 2796         mps_dprint(sc, MPS_TRACE, __func__);
 2797         KKASSERT(lockstatus(&sc->mps_lock, curthread) != 0);
 2798 
 2799         resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
 2800         ccb = tm->cm_complete_data;
 2801 
 2802         /*
 2803          * Currently there should be no way we can hit this case.  It only
 2804          * happens when we have a failure to allocate chain frames, and
 2805          * task management commands don't have S/G lists.
 2806          */
 2807         if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
 2808                 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
 2809 
 2810                 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
 2811 
 2812                 mps_printf(sc, "%s: cm_flags = %#x for reset of handle %#04x! "
 2813                            "This should not happen!\n", __func__, tm->cm_flags,
 2814                            req->DevHandle);
 2815                 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
 2816                 goto bailout;
 2817         }
 2818 
 2819         kprintf("%s: IOCStatus = 0x%x ResponseCode = 0x%x\n", __func__,
 2820             resp->IOCStatus, resp->ResponseCode);
 2821 
 2822         if (resp->ResponseCode == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
 2823                 ccb->ccb_h.status = CAM_REQ_CMP;
 2824                 mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
 2825                     CAM_LUN_WILDCARD);
 2826         }
 2827         else
 2828                 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
 2829 
 2830 bailout:
 2831 
 2832         mpssas_free_tm(sc, tm);
 2833         xpt_done(ccb);
 2834 }
 2835 
 2836 static void
 2837 mpssas_poll(struct cam_sim *sim)
 2838 {
 2839         struct mpssas_softc *sassc;
 2840 
 2841         sassc = cam_sim_softc(sim);
 2842 
 2843         if (sassc->sc->mps_debug & MPS_TRACE) {
 2844                 /* frequent debug messages during a panic just slow
 2845                  * everything down too much.
 2846                  */
 2847                 mps_printf(sassc->sc, "%s clearing MPS_TRACE\n", __func__);
 2848                 sassc->sc->mps_debug &= ~MPS_TRACE;
 2849         }
 2850 
 2851         mps_intr_locked(sassc->sc);
 2852 }
 2853 
 2854 static void
 2855 mpssas_rescan_done(struct cam_periph *periph, union ccb *done_ccb)
 2856 {
 2857         struct mpssas_softc *sassc;
 2858         char path_str[64];
 2859 
 2860         if (done_ccb == NULL)
 2861                 return;
 2862 
 2863         sassc = (struct mpssas_softc *)done_ccb->ccb_h.ppriv_ptr1;
 2864 
 2865         KKASSERT(lockstatus(&sassc->sc->mps_lock, curthread) != 0);
 2866 
 2867         xpt_path_string(done_ccb->ccb_h.path, path_str, sizeof(path_str));
 2868         mps_dprint(sassc->sc, MPS_INFO, "Completing rescan for %s\n", path_str);
 2869 
 2870         xpt_free_path(done_ccb->ccb_h.path);
 2871         xpt_free_ccb(done_ccb);
 2872 
 2873 #if __FreeBSD_version < 1000006
 2874         /*
 2875          * Before completing scan, get EEDP stuff for all of the existing
 2876          * targets.
 2877          */
 2878         mpssas_check_eedp(sassc);
 2879 #endif
 2880 
 2881 }
 2882 
 2883 /* thread to handle bus rescans */
 2884 static void
 2885 mpssas_scanner_thread(void *arg)
 2886 {
 2887         struct mpssas_softc *sassc;
 2888         struct mps_softc *sc;
 2889         union ccb       *ccb;
 2890 
 2891         sassc = (struct mpssas_softc *)arg;
 2892         sc = sassc->sc;
 2893 
 2894         mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
 2895 
 2896         mps_lock(sc);
 2897         for (;;) {
 2898                 /* Sleep for 1 second and check the queue status*/
 2899                 lksleep(&sassc->ccb_scanq, &sc->mps_lock, 0, "mps_scanq", 1 * hz);
 2900                 if (sassc->flags & MPSSAS_SHUTDOWN) {
 2901                         mps_dprint(sc, MPS_TRACE, "Scanner shutting down\n");
 2902                         break;
 2903                 }
 2904 next_work:
 2905                 /* Get first work */
 2906                 ccb = (union ccb *)TAILQ_FIRST(&sassc->ccb_scanq);
 2907                 if (ccb == NULL)
 2908                         continue;
 2909                 /* Got first work */
 2910                 TAILQ_REMOVE(&sassc->ccb_scanq, &ccb->ccb_h, sim_links.tqe);
 2911                 xpt_action(ccb);
 2912                 if (sassc->flags & MPSSAS_SHUTDOWN) {
 2913                         mps_dprint(sc, MPS_TRACE, "Scanner shutting down\n");
 2914                         break;
 2915                 }
 2916                 goto next_work;
 2917         }
 2918 
 2919         sassc->flags &= ~MPSSAS_SCANTHREAD;
 2920         wakeup(&sassc->flags);
 2921         mps_unlock(sc);
 2922         mps_dprint(sc, MPS_TRACE, "Scanner exiting\n");
 2923         mps_kproc_exit(0);
 2924 }
 2925 
 2926 static void
 2927 mpssas_rescan(struct mpssas_softc *sassc, union ccb *ccb)
 2928 {
 2929         char path_str[64];
 2930 
 2931         mps_dprint(sassc->sc, MPS_TRACE, "%s\n", __func__);
 2932 
 2933         KKASSERT(lockstatus(&sassc->sc->mps_lock, curthread) != 0);
 2934 
 2935         if (ccb == NULL)
 2936                 return;
 2937 
 2938         xpt_path_string(ccb->ccb_h.path, path_str, sizeof(path_str));
 2939         mps_dprint(sassc->sc, MPS_INFO, "Queueing rescan for %s\n", path_str);
 2940 
 2941         /* Prepare request */
 2942         ccb->ccb_h.ppriv_ptr1 = sassc;
 2943         ccb->ccb_h.cbfcnp = mpssas_rescan_done;
 2944         xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, MPS_PRIORITY_XPT);
 2945         TAILQ_INSERT_TAIL(&sassc->ccb_scanq, &ccb->ccb_h, sim_links.tqe);
 2946         wakeup(&sassc->ccb_scanq);
 2947 }
 2948 
 2949 #if __FreeBSD_version >= 1000006
 2950 static void
 2951 mpssas_async(void *callback_arg, uint32_t code, struct cam_path *path,
 2952              void *arg)
 2953 {
 2954         struct mps_softc *sc;
 2955 
 2956         sc = (struct mps_softc *)callback_arg;
 2957 
 2958         switch (code) {
 2959         case AC_ADVINFO_CHANGED: {
 2960                 struct mpssas_target *target;
 2961                 struct mpssas_softc *sassc;
 2962                 struct scsi_read_capacity_data_long rcap_buf;
 2963                 struct ccb_dev_advinfo cdai;
 2964                 struct mpssas_lun *lun;
 2965                 lun_id_t lunid;
 2966                 int found_lun;
 2967                 uintptr_t buftype;
 2968 
 2969                 buftype = (uintptr_t)arg;
 2970 
 2971                 found_lun = 0;
 2972                 sassc = sc->sassc;
 2973 
 2974                 /*
 2975                  * We're only interested in read capacity data changes.
 2976                  */
 2977                 if (buftype != CDAI_TYPE_RCAPLONG)
 2978                         break;
 2979 
 2980                 /*
 2981                  * We're only interested in devices that are attached to
 2982                  * this controller.
 2983                  */
 2984                 if (xpt_path_path_id(path) != sassc->sim->path_id)
 2985                         break;
 2986 
 2987                 /*
 2988                  * We should have a handle for this, but check to make sure.
 2989                  */
 2990                 target = &sassc->targets[xpt_path_target_id(path)];
 2991                 if (target->handle == 0)
 2992                         break;
 2993 
 2994                 lunid = xpt_path_lun_id(path);
 2995 
 2996                 SLIST_FOREACH(lun, &target->luns, lun_link) {
 2997                         if (lun->lun_id == lunid) {
 2998                                 found_lun = 1;
 2999                                 break;
 3000                         }
 3001                 }
 3002 
 3003                 if (found_lun == 0) {
 3004                         lun = kmalloc(sizeof(struct mpssas_lun), M_MPT2,
 3005                                      M_INTWAIT | M_ZERO);
 3006                         if (lun == NULL) {
 3007                                 mps_dprint(sc, MPS_FAULT, "Unable to alloc "
 3008                                            "LUN for EEDP support.\n");
 3009                                 break;
 3010                         }
 3011                         lun->lun_id = lunid;
 3012                         SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
 3013                 }
 3014 
 3015                 bzero(&rcap_buf, sizeof(rcap_buf));
 3016                 xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
 3017                 cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
 3018                 cdai.ccb_h.flags = CAM_DIR_IN;
 3019                 cdai.buftype = CDAI_TYPE_RCAPLONG;
 3020                 cdai.flags = 0;
 3021                 cdai.bufsiz = sizeof(rcap_buf);
 3022                 cdai.buf = (uint8_t *)&rcap_buf;
 3023                 xpt_action((union ccb *)&cdai);
 3024                 if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
 3025                         cam_release_devq(cdai.ccb_h.path,
 3026                                          0, 0, 0, FALSE);
 3027 
 3028                 if (((cdai.ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP)
 3029                  && (rcap_buf.prot & SRC16_PROT_EN)) {
 3030                         lun->eedp_formatted = TRUE;
 3031                         lun->eedp_block_size = scsi_4btoul(rcap_buf.length);
 3032                 } else {
 3033                         lun->eedp_formatted = FALSE;
 3034                         lun->eedp_block_size = 0;
 3035                 }
 3036                 break;
 3037         }
 3038         default:
 3039                 break;
 3040         }
 3041 }
 3042 #else /* __FreeBSD_version >= 1000006 */
 3043 
 3044 static void
 3045 mpssas_check_eedp(struct mpssas_softc *sassc)
 3046 {
 3047         struct mps_softc *sc = sassc->sc;
 3048         struct ccb_scsiio *csio;
 3049         struct scsi_read_capacity_16 *scsi_cmd;
 3050         struct scsi_read_capacity_eedp *rcap_buf;
 3051         union ccb *ccb;
 3052         path_id_t pathid = cam_sim_path(sassc->sim);
 3053         target_id_t targetid;
 3054         lun_id_t lunid;
 3055         struct cam_periph *found_periph;
 3056         struct mpssas_target *target;
 3057         struct mpssas_lun *lun;
 3058         uint8_t found_lun;
 3059 
 3060         /*
 3061          * Issue a READ CAPACITY 16 command to each LUN of each target.  This
 3062          * info is used to determine if the LUN is formatted for EEDP support.
 3063          */
 3064         for (targetid = 0; targetid < sc->facts->MaxTargets; targetid++) {
 3065                 target = &sassc->targets[targetid];
 3066                 if (target->handle == 0x0) {
 3067                         continue;
 3068                 }
 3069 
 3070                 lunid = 0;
 3071                 do {
 3072                         rcap_buf =
 3073                             kmalloc(sizeof(struct scsi_read_capacity_eedp),
 3074                             M_MPT2, M_INTWAIT | M_ZERO);
 3075                         if (rcap_buf == NULL) {
 3076                                 mps_dprint(sc, MPS_FAULT, "Unable to alloc read "
 3077                                     "capacity buffer for EEDP support.\n");
 3078                                 return;
 3079                         }
 3080 
 3081                         ccb = kmalloc(sizeof(union ccb), M_TEMP,
 3082                             M_WAITOK | M_ZERO);
 3083 
 3084                         if (xpt_create_path(&ccb->ccb_h.path, xpt_periph,
 3085                             pathid, targetid, lunid) != CAM_REQ_CMP) {
 3086                                 mps_dprint(sc, MPS_FAULT, "Unable to create "
 3087                                     "path for EEDP support\n");
 3088                                 kfree(rcap_buf, M_MPT2);
 3089                                 xpt_free_ccb(ccb);
 3090                                 return;
 3091                         }
 3092 
 3093                         /*
 3094                          * If a periph is returned, the LUN exists.  Create an
 3095                          * entry in the target's LUN list.
 3096                          */
 3097                         if ((found_periph = cam_periph_find(ccb->ccb_h.path,
 3098                             NULL)) != NULL) {
 3099                                 /*
 3100                                  * If LUN is already in list, don't create a new
 3101                                  * one.
 3102                                  */
 3103                                 found_lun = FALSE;
 3104                                 SLIST_FOREACH(lun, &target->luns, lun_link) {
 3105                                         if (lun->lun_id == lunid) {
 3106                                                 found_lun = TRUE;
 3107                                                 break;
 3108                                         }
 3109                                 }
 3110                                 if (!found_lun) {
 3111                                         lun = kmalloc(sizeof(struct mpssas_lun),
 3112                                             M_MPT2, M_INTWAIT | M_ZERO);
 3113                                         lun->lun_id = lunid;
 3114                                         SLIST_INSERT_HEAD(&target->luns, lun,
 3115                                             lun_link);
 3116                                 }
 3117                                 lunid++;
 3118 
 3119                                 /*
 3120                                  * Issue a READ CAPACITY 16 command for the LUN.
 3121                                  * The mpssas_read_cap_done function will load
 3122                                  * the read cap info into the LUN struct.
 3123                                  */
 3124                                 csio = &ccb->csio;
 3125                                 csio->ccb_h.func_code = XPT_SCSI_IO;
 3126                                 csio->ccb_h.flags = CAM_DIR_IN;
 3127                                 csio->ccb_h.retry_count = 4;
 3128                                 csio->ccb_h.cbfcnp = mpssas_read_cap_done;
 3129                                 csio->ccb_h.timeout = 60000;
 3130                                 csio->data_ptr = (uint8_t *)rcap_buf;
 3131                                 csio->dxfer_len = sizeof(struct
 3132                                     scsi_read_capacity_eedp);
 3133                                 csio->sense_len = MPS_SENSE_LEN;
 3134                                 csio->cdb_len = sizeof(*scsi_cmd);
 3135                                 csio->tag_action = MSG_SIMPLE_Q_TAG;
 3136 
 3137                                 scsi_cmd = (struct scsi_read_capacity_16 *)
 3138                                     &csio->cdb_io.cdb_bytes;
 3139                                 bzero(scsi_cmd, sizeof(*scsi_cmd));
 3140                                 scsi_cmd->opcode = 0x9E;
 3141                                 scsi_cmd->service_action = SRC16_SERVICE_ACTION;
 3142                                 ((uint8_t *)scsi_cmd)[13] = sizeof(struct
 3143                                     scsi_read_capacity_eedp);
 3144 
 3145                                 /*
 3146                                  * Set the path, target and lun IDs for the READ
 3147                                  * CAPACITY request.
 3148                                  */
 3149                                 ccb->ccb_h.path_id =
 3150                                     xpt_path_path_id(ccb->ccb_h.path);
 3151                                 ccb->ccb_h.target_id =
 3152                                     xpt_path_target_id(ccb->ccb_h.path);
 3153                                 ccb->ccb_h.target_lun =
 3154                                     xpt_path_lun_id(ccb->ccb_h.path);
 3155 
 3156                                 ccb->ccb_h.ppriv_ptr1 = sassc;
 3157                                 xpt_action(ccb);
 3158                         } else {
 3159                                 kfree(rcap_buf, M_MPT2);
 3160                                 xpt_free_path(ccb->ccb_h.path);
 3161                                 xpt_free_ccb(ccb);
 3162                         }
 3163                 } while (found_periph);
 3164         }
 3165 }
 3166 
 3167 
 3168 static void
 3169 mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb)
 3170 {
 3171         struct mpssas_softc *sassc;
 3172         struct mpssas_target *target;
 3173         struct mpssas_lun *lun;
 3174         struct scsi_read_capacity_eedp *rcap_buf;
 3175 
 3176         if (done_ccb == NULL)
 3177                 return;
 3178 
 3179         /*
 3180          * Driver need to release devq, it Scsi command is
 3181          * generated by driver internally.
 3182          * Currently there is a single place where driver
 3183          * calls scsi command internally. In future if driver
 3184          * calls more scsi command internally, it needs to release
 3185          * devq internally, since those command will not go back to
 3186          * cam_periph.
 3187          */
 3188         if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) ) {
 3189                 done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
 3190                 xpt_release_devq(done_ccb->ccb_h.path,
 3191                                  /*count*/ 1, /*run_queue*/TRUE);
 3192         }
 3193 
 3194         rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr;
 3195 
 3196         /*
 3197          * Get the LUN ID for the path and look it up in the LUN list for the
 3198          * target.
 3199          */
 3200         sassc = (struct mpssas_softc *)done_ccb->ccb_h.ppriv_ptr1;
 3201         target = &sassc->targets[done_ccb->ccb_h.target_id];
 3202         SLIST_FOREACH(lun, &target->luns, lun_link) {
 3203                 if (lun->lun_id != done_ccb->ccb_h.target_lun)
 3204                         continue;
 3205 
 3206                 /*
 3207                  * Got the LUN in the target's LUN list.  Fill it in
 3208                  * with EEDP info.  If the READ CAP 16 command had some
 3209                  * SCSI error (common if command is not supported), mark
 3210                  * the lun as not supporting EEDP and set the block size
 3211                  * to 0.
 3212                  */
 3213                 if (((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
 3214                  || (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) {
 3215                         lun->eedp_formatted = FALSE;
 3216                         lun->eedp_block_size = 0;
 3217                         break;
 3218                 }
 3219 
 3220                 if (rcap_buf->protect & 0x01) {
 3221                         lun->eedp_formatted = TRUE;
 3222                         lun->eedp_block_size = scsi_4btoul(rcap_buf->length);
 3223                 }
 3224                 break;
 3225         }
 3226 
 3227         // Finished with this CCB and path.
 3228         kfree(rcap_buf, M_MPT2);
 3229         xpt_free_path(done_ccb->ccb_h.path);
 3230         xpt_free_ccb(done_ccb);
 3231 }
 3232 #endif /* __FreeBSD_version >= 1000006 */
 3233 
 3234 int
 3235 mpssas_startup(struct mps_softc *sc)
 3236 {
 3237         struct mpssas_softc *sassc;
 3238 
 3239         /*
 3240          * Send the port enable message and set the wait_for_port_enable flag.
 3241          * This flag helps to keep the simq frozen until all discovery events
 3242          * are processed.
 3243          */
 3244         sassc = sc->sassc;
 3245         mpssas_startup_increment(sassc);
 3246         sc->wait_for_port_enable = 1;
 3247         mpssas_send_portenable(sc);
 3248         return (0);
 3249 }
 3250 
 3251 static int
 3252 mpssas_send_portenable(struct mps_softc *sc)
 3253 {
 3254         MPI2_PORT_ENABLE_REQUEST *request;
 3255         struct mps_command *cm;
 3256 
 3257         mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
 3258 
 3259         if ((cm = mps_alloc_command(sc)) == NULL)
 3260                 return (EBUSY);
 3261         request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
 3262         request->Function = MPI2_FUNCTION_PORT_ENABLE;
 3263         request->MsgFlags = 0;
 3264         request->VP_ID = 0;
 3265         cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
 3266         cm->cm_complete = mpssas_portenable_complete;
 3267         cm->cm_data = NULL;
 3268         cm->cm_sge = NULL;
 3269 
 3270         mps_map_command(sc, cm);
 3271         mps_dprint(sc, MPS_TRACE,
 3272             "mps_send_portenable finished cm %p req %p complete %p\n",
 3273             cm, cm->cm_req, cm->cm_complete);
 3274         return (0);
 3275 }
 3276 
 3277 static void
 3278 mpssas_portenable_complete(struct mps_softc *sc, struct mps_command *cm)
 3279 {
 3280         MPI2_PORT_ENABLE_REPLY *reply;
 3281         struct mpssas_softc *sassc;
 3282         struct mpssas_target *target;
 3283         int i;
 3284 
 3285         mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
 3286         sassc = sc->sassc;
 3287 
 3288         /*
 3289          * Currently there should be no way we can hit this case.  It only
 3290          * happens when we have a failure to allocate chain frames, and
 3291          * port enable commands don't have S/G lists.
 3292          */
 3293         if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
 3294                 mps_printf(sc, "%s: cm_flags = %#x for port enable! "
 3295                            "This should not happen!\n", __func__, cm->cm_flags);
 3296         }
 3297 
 3298         reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
 3299         if (reply == NULL)
 3300                 mps_dprint(sc, MPS_FAULT, "Portenable NULL reply\n");
 3301         else if ((reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
 3302             MPI2_IOCSTATUS_SUCCESS)
 3303                 mps_dprint(sc, MPS_FAULT, "Portenable failed\n");
 3304 
 3305         mps_free_command(sc, cm);
 3306         if (sc->mps_ich.ich_arg != NULL) {
 3307                 mps_dprint(sc, MPS_INFO, "disestablish config intrhook\n");
 3308                 config_intrhook_disestablish(&sc->mps_ich);
 3309                 sc->mps_ich.ich_arg = NULL;
 3310         }
 3311 
 3312         /*
 3313          * Get WarpDrive info after discovery is complete but before the scan
 3314          * starts.  At this point, all devices are ready to be exposed to the
 3315          * OS.  If devices should be hidden instead, take them out of the
 3316          * 'targets' array before the scan.  The devinfo for a disk will have
 3317          * some info and a volume's will be 0.  Use that to remove disks.
 3318          */
 3319         mps_wd_config_pages(sc);
 3320         if (((sc->mps_flags & MPS_FLAGS_WD_AVAILABLE)
 3321           && (sc->WD_hide_expose == MPS_WD_HIDE_ALWAYS))
 3322          || (sc->WD_valid_config && (sc->WD_hide_expose ==
 3323             MPS_WD_HIDE_IF_VOLUME))) {
 3324                 for (i = 0; i < sassc->sc->facts->MaxTargets; i++) {
 3325                         target = &sassc->targets[i];
 3326                         if (target->devinfo) {
 3327                                 target->devinfo = 0x0;
 3328                                 target->encl_handle = 0x0;
 3329                                 target->encl_slot = 0x0;
 3330                                 target->handle = 0x0;
 3331                                 target->tid = 0x0;
 3332                                 target->linkrate = 0x0;
 3333                                 target->flags = 0x0;
 3334                         }
 3335                 }
 3336         }
 3337 
 3338         /*
 3339          * Done waiting for port enable to complete.  Decrement the refcount.
 3340          * If refcount is 0, discovery is complete and a rescan of the bus can
 3341          * take place.  Since the simq was explicitly frozen before port
 3342          * enable, it must be explicitly released here to keep the
 3343          * freeze/release count in sync.
 3344          */
 3345         sc->wait_for_port_enable = 0;
 3346         sc->port_enable_complete = 1;
 3347         mpssas_startup_decrement(sassc);
 3348         xpt_release_simq(sassc->sim, 1);
 3349 }

Cache object: d0b4afb55aed9802c77ab6336feab1d7


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.