The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/mpr/mpr_sas.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2009 Yahoo! Inc.
    3  * Copyright (c) 2011-2015 LSI Corp.
    4  * Copyright (c) 2013-2016 Avago Technologies
    5  * All rights reserved.
    6  *
    7  * Redistribution and use in source and binary forms, with or without
    8  * modification, are permitted provided that the following conditions
    9  * are met:
   10  * 1. Redistributions of source code must retain the above copyright
   11  *    notice, this list of conditions and the following disclaimer.
   12  * 2. Redistributions in binary form must reproduce the above copyright
   13  *    notice, this list of conditions and the following disclaimer in the
   14  *    documentation and/or other materials provided with the distribution.
   15  *
   16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   26  * SUCH DAMAGE.
   27  *
   28  * Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD
   29  *
   30  */
   31 
   32 #include <sys/cdefs.h>
   33 __FBSDID("$FreeBSD$");
   34 
   35 /* Communications core for Avago Technologies (LSI) MPT3 */
   36 
   37 /* TODO Move headers to mprvar */
   38 #include <sys/types.h>
   39 #include <sys/param.h>
   40 #include <sys/systm.h>
   41 #include <sys/kernel.h>
   42 #include <sys/selinfo.h>
   43 #include <sys/module.h>
   44 #include <sys/bus.h>
   45 #include <sys/conf.h>
   46 #include <sys/bio.h>
   47 #include <sys/malloc.h>
   48 #include <sys/uio.h>
   49 #include <sys/sysctl.h>
   50 #include <sys/endian.h>
   51 #include <sys/queue.h>
   52 #include <sys/kthread.h>
   53 #include <sys/taskqueue.h>
   54 #include <sys/sbuf.h>
   55 
   56 #include <machine/bus.h>
   57 #include <machine/resource.h>
   58 #include <sys/rman.h>
   59 
   60 #include <machine/stdarg.h>
   61 
   62 #include <cam/cam.h>
   63 #include <cam/cam_ccb.h>
   64 #include <cam/cam_debug.h>
   65 #include <cam/cam_sim.h>
   66 #include <cam/cam_xpt_sim.h>
   67 #include <cam/cam_xpt_periph.h>
   68 #include <cam/cam_periph.h>
   69 #include <cam/scsi/scsi_all.h>
   70 #include <cam/scsi/scsi_message.h>
   71 #if __FreeBSD_version >= 900026
   72 #include <cam/scsi/smp_all.h>
   73 #endif
   74 
   75 #include <dev/nvme/nvme.h>
   76 
   77 #include <dev/mpr/mpi/mpi2_type.h>
   78 #include <dev/mpr/mpi/mpi2.h>
   79 #include <dev/mpr/mpi/mpi2_ioc.h>
   80 #include <dev/mpr/mpi/mpi2_sas.h>
   81 #include <dev/mpr/mpi/mpi2_pci.h>
   82 #include <dev/mpr/mpi/mpi2_cnfg.h>
   83 #include <dev/mpr/mpi/mpi2_init.h>
   84 #include <dev/mpr/mpi/mpi2_tool.h>
   85 #include <dev/mpr/mpr_ioctl.h>
   86 #include <dev/mpr/mprvar.h>
   87 #include <dev/mpr/mpr_table.h>
   88 #include <dev/mpr/mpr_sas.h>
   89 
   90 #define MPRSAS_DISCOVERY_TIMEOUT        20
   91 #define MPRSAS_MAX_DISCOVERY_TIMEOUTS   10 /* 200 seconds */
   92 
   93 /*
   94  * static array to check SCSI OpCode for EEDP protection bits
   95  */
   96 #define PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
   97 #define PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
   98 #define PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
   99 static uint8_t op_code_prot[256] = {
  100         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  101         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  102         0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
  103         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  104         0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  105         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  106         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  107         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  108         0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
  109         0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  110         0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
  111         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  112         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  113         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  114         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  115         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
  116 };
  117 
  118 MALLOC_DEFINE(M_MPRSAS, "MPRSAS", "MPR SAS memory");
  119 
  120 static void mprsas_remove_device(struct mpr_softc *, struct mpr_command *);
  121 static void mprsas_remove_complete(struct mpr_softc *, struct mpr_command *);
  122 static void mprsas_action(struct cam_sim *sim, union ccb *ccb);
  123 static void mprsas_poll(struct cam_sim *sim);
  124 static void mprsas_scsiio_timeout(void *data);
  125 static void mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *cm);
  126 static void mprsas_action_scsiio(struct mprsas_softc *, union ccb *);
  127 static void mprsas_scsiio_complete(struct mpr_softc *, struct mpr_command *);
  128 static void mprsas_action_resetdev(struct mprsas_softc *, union ccb *);
  129 static void mprsas_resetdev_complete(struct mpr_softc *, struct mpr_command *);
  130 static int mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
  131     struct mpr_command *cm);
  132 static void mprsas_async(void *callback_arg, uint32_t code,
  133     struct cam_path *path, void *arg);
  134 #if (__FreeBSD_version < 901503) || \
  135     ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
  136 static void mprsas_check_eedp(struct mpr_softc *sc, struct cam_path *path,
  137     struct ccb_getdev *cgd);
  138 static void mprsas_read_cap_done(struct cam_periph *periph,
  139     union ccb *done_ccb);
  140 #endif
  141 static int mprsas_send_portenable(struct mpr_softc *sc);
  142 static void mprsas_portenable_complete(struct mpr_softc *sc,
  143     struct mpr_command *cm);
  144 
  145 #if __FreeBSD_version >= 900026
  146 static void mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm);
  147 static void mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb,
  148     uint64_t sasaddr);
  149 static void mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb);
  150 #endif //FreeBSD_version >= 900026
  151 
  152 struct mprsas_target *
  153 mprsas_find_target_by_handle(struct mprsas_softc *sassc, int start,
  154     uint16_t handle)
  155 {
  156         struct mprsas_target *target;
  157         int i;
  158 
  159         for (i = start; i < sassc->maxtargets; i++) {
  160                 target = &sassc->targets[i];
  161                 if (target->handle == handle)
  162                         return (target);
  163         }
  164 
  165         return (NULL);
  166 }
  167 
  168 /* we need to freeze the simq during attach and diag reset, to avoid failing
  169  * commands before device handles have been found by discovery.  Since
  170  * discovery involves reading config pages and possibly sending commands,
  171  * discovery actions may continue even after we receive the end of discovery
  172  * event, so refcount discovery actions instead of assuming we can unfreeze
  173  * the simq when we get the event.
  174  */
  175 void
  176 mprsas_startup_increment(struct mprsas_softc *sassc)
  177 {
  178         MPR_FUNCTRACE(sassc->sc);
  179 
  180         if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
  181                 if (sassc->startup_refcount++ == 0) {
  182                         /* just starting, freeze the simq */
  183                         mpr_dprint(sassc->sc, MPR_INIT,
  184                             "%s freezing simq\n", __func__);
  185 #if (__FreeBSD_version >= 1000039) || \
  186     ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
  187                         xpt_hold_boot();
  188 #endif
  189                         xpt_freeze_simq(sassc->sim, 1);
  190                 }
  191                 mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
  192                     sassc->startup_refcount);
  193         }
  194 }
  195 
  196 void
  197 mprsas_release_simq_reinit(struct mprsas_softc *sassc)
  198 {
  199         if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
  200                 sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
  201                 xpt_release_simq(sassc->sim, 1);
  202                 mpr_dprint(sassc->sc, MPR_INFO, "Unfreezing SIM queue\n");
  203         }
  204 }
  205 
  206 void
  207 mprsas_startup_decrement(struct mprsas_softc *sassc)
  208 {
  209         MPR_FUNCTRACE(sassc->sc);
  210 
  211         if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
  212                 if (--sassc->startup_refcount == 0) {
  213                         /* finished all discovery-related actions, release
  214                          * the simq and rescan for the latest topology.
  215                          */
  216                         mpr_dprint(sassc->sc, MPR_INIT,
  217                             "%s releasing simq\n", __func__);
  218                         sassc->flags &= ~MPRSAS_IN_STARTUP;
  219                         xpt_release_simq(sassc->sim, 1);
  220 #if (__FreeBSD_version >= 1000039) || \
  221     ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
  222                         xpt_release_boot();
  223 #else
  224                         mprsas_rescan_target(sassc->sc, NULL);
  225 #endif
  226                 }
  227                 mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
  228                     sassc->startup_refcount);
  229         }
  230 }
  231 
  232 /* The firmware requires us to stop sending commands when we're doing task
  233  * management, so refcount the TMs and keep the simq frozen when any are in
  234  * use.
  235  */
  236 struct mpr_command *
  237 mprsas_alloc_tm(struct mpr_softc *sc)
  238 {
  239         struct mpr_command *tm;
  240 
  241         MPR_FUNCTRACE(sc);
  242         tm = mpr_alloc_high_priority_command(sc);
  243         return tm;
  244 }
  245 
  246 void
  247 mprsas_free_tm(struct mpr_softc *sc, struct mpr_command *tm)
  248 {
  249         int target_id = 0xFFFFFFFF;
  250 
  251         MPR_FUNCTRACE(sc);
  252         if (tm == NULL)
  253                 return;
  254 
  255         /*
  256          * For TM's the devq is frozen for the device.  Unfreeze it here and
  257          * free the resources used for freezing the devq.  Must clear the
  258          * INRESET flag as well or scsi I/O will not work.
  259          */
  260         if (tm->cm_targ != NULL) {
  261                 tm->cm_targ->flags &= ~MPRSAS_TARGET_INRESET;
  262                 target_id = tm->cm_targ->tid;
  263         }
  264         if (tm->cm_ccb) {
  265                 mpr_dprint(sc, MPR_INFO, "Unfreezing devq for target ID %d\n",
  266                     target_id);
  267                 xpt_release_devq(tm->cm_ccb->ccb_h.path, 1, TRUE);
  268                 xpt_free_path(tm->cm_ccb->ccb_h.path);
  269                 xpt_free_ccb(tm->cm_ccb);
  270         }
  271 
  272         mpr_free_high_priority_command(sc, tm);
  273 }
  274 
  275 void
  276 mprsas_rescan_target(struct mpr_softc *sc, struct mprsas_target *targ)
  277 {
  278         struct mprsas_softc *sassc = sc->sassc;
  279         path_id_t pathid;
  280         target_id_t targetid;
  281         union ccb *ccb;
  282 
  283         MPR_FUNCTRACE(sc);
  284         pathid = cam_sim_path(sassc->sim);
  285         if (targ == NULL)
  286                 targetid = CAM_TARGET_WILDCARD;
  287         else
  288                 targetid = targ - sassc->targets;
  289 
  290         /*
  291          * Allocate a CCB and schedule a rescan.
  292          */
  293         ccb = xpt_alloc_ccb_nowait();
  294         if (ccb == NULL) {
  295                 mpr_dprint(sc, MPR_ERROR, "unable to alloc CCB for rescan\n");
  296                 return;
  297         }
  298 
  299         if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid, targetid,
  300             CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
  301                 mpr_dprint(sc, MPR_ERROR, "unable to create path for rescan\n");
  302                 xpt_free_ccb(ccb);
  303                 return;
  304         }
  305 
  306         if (targetid == CAM_TARGET_WILDCARD)
  307                 ccb->ccb_h.func_code = XPT_SCAN_BUS;
  308         else
  309                 ccb->ccb_h.func_code = XPT_SCAN_TGT;
  310 
  311         mpr_dprint(sc, MPR_TRACE, "%s targetid %u\n", __func__, targetid);
  312         xpt_rescan(ccb);
  313 }
  314 
  315 static void
  316 mprsas_log_command(struct mpr_command *cm, u_int level, const char *fmt, ...)
  317 {
  318         struct sbuf sb;
  319         va_list ap;
  320         char str[224];
  321         char path_str[64];
  322 
  323         if (cm == NULL)
  324                 return;
  325 
  326         /* No need to be in here if debugging isn't enabled */
  327         if ((cm->cm_sc->mpr_debug & level) == 0)
  328                 return;
  329 
  330         sbuf_new(&sb, str, sizeof(str), 0);
  331 
  332         va_start(ap, fmt);
  333 
  334         if (cm->cm_ccb != NULL) {
  335                 xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
  336                     sizeof(path_str));
  337                 sbuf_cat(&sb, path_str);
  338                 if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
  339                         scsi_command_string(&cm->cm_ccb->csio, &sb);
  340                         sbuf_printf(&sb, "length %d ",
  341                             cm->cm_ccb->csio.dxfer_len);
  342                 }
  343         } else {
  344                 sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
  345                     cam_sim_name(cm->cm_sc->sassc->sim),
  346                     cam_sim_unit(cm->cm_sc->sassc->sim),
  347                     cam_sim_bus(cm->cm_sc->sassc->sim),
  348                     cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
  349                     cm->cm_lun);
  350         }
  351 
  352         sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
  353         sbuf_vprintf(&sb, fmt, ap);
  354         sbuf_finish(&sb);
  355         mpr_print_field(cm->cm_sc, "%s", sbuf_data(&sb));
  356 
  357         va_end(ap);
  358 }
  359 
  360 static void
  361 mprsas_remove_volume(struct mpr_softc *sc, struct mpr_command *tm)
  362 {
  363         MPI2_SCSI_TASK_MANAGE_REPLY *reply;
  364         struct mprsas_target *targ;
  365         uint16_t handle;
  366 
  367         MPR_FUNCTRACE(sc);
  368 
  369         reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
  370         handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
  371         targ = tm->cm_targ;
  372 
  373         if (reply == NULL) {
  374                 /* XXX retry the remove after the diag reset completes? */
  375                 mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
  376                     "0x%04x\n", __func__, handle);
  377                 mprsas_free_tm(sc, tm);
  378                 return;
  379         }
  380 
  381         if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
  382             MPI2_IOCSTATUS_SUCCESS) {
  383                 mpr_dprint(sc, MPR_ERROR, "IOCStatus = 0x%x while resetting "
  384                     "device 0x%x\n", le16toh(reply->IOCStatus), handle);
  385         }
  386 
  387         mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
  388             le32toh(reply->TerminationCount));
  389         mpr_free_reply(sc, tm->cm_reply_data);
  390         tm->cm_reply = NULL;    /* Ensures the reply won't get re-freed */
  391 
  392         mpr_dprint(sc, MPR_XINFO, "clearing target %u handle 0x%04x\n",
  393             targ->tid, handle);
  394         
  395         /*
  396          * Don't clear target if remove fails because things will get confusing.
  397          * Leave the devname and sasaddr intact so that we know to avoid reusing
  398          * this target id if possible, and so we can assign the same target id
  399          * to this device if it comes back in the future.
  400          */
  401         if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
  402             MPI2_IOCSTATUS_SUCCESS) {
  403                 targ = tm->cm_targ;
  404                 targ->handle = 0x0;
  405                 targ->encl_handle = 0x0;
  406                 targ->encl_level_valid = 0x0;
  407                 targ->encl_level = 0x0;
  408                 targ->connector_name[0] = ' ';
  409                 targ->connector_name[1] = ' ';
  410                 targ->connector_name[2] = ' ';
  411                 targ->connector_name[3] = ' ';
  412                 targ->encl_slot = 0x0;
  413                 targ->exp_dev_handle = 0x0;
  414                 targ->phy_num = 0x0;
  415                 targ->linkrate = 0x0;
  416                 targ->devinfo = 0x0;
  417                 targ->flags = 0x0;
  418                 targ->scsi_req_desc_type = 0;
  419         }
  420 
  421         mprsas_free_tm(sc, tm);
  422 }
  423 
  424 
  425 /*
  426  * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
  427  * Otherwise Volume Delete is same as Bare Drive Removal.
  428  */
  429 void
  430 mprsas_prepare_volume_remove(struct mprsas_softc *sassc, uint16_t handle)
  431 {
  432         MPI2_SCSI_TASK_MANAGE_REQUEST *req;
  433         struct mpr_softc *sc;
  434         struct mpr_command *cm;
  435         struct mprsas_target *targ = NULL;
  436 
  437         MPR_FUNCTRACE(sassc->sc);
  438         sc = sassc->sc;
  439 
  440         targ = mprsas_find_target_by_handle(sassc, 0, handle);
  441         if (targ == NULL) {
  442                 /* FIXME: what is the action? */
  443                 /* We don't know about this device? */
  444                 mpr_dprint(sc, MPR_ERROR,
  445                    "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
  446                 return;
  447         }
  448 
  449         targ->flags |= MPRSAS_TARGET_INREMOVAL;
  450 
  451         cm = mprsas_alloc_tm(sc);
  452         if (cm == NULL) {
  453                 mpr_dprint(sc, MPR_ERROR,
  454                     "%s: command alloc failure\n", __func__);
  455                 return;
  456         }
  457 
  458         mprsas_rescan_target(sc, targ);
  459 
  460         req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
  461         req->DevHandle = targ->handle;
  462         req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
  463         req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
  464 
  465         /* SAS Hard Link Reset / SATA Link Reset */
  466         req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
  467 
  468         cm->cm_targ = targ;
  469         cm->cm_data = NULL;
  470         cm->cm_desc.HighPriority.RequestFlags =
  471             MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
  472         cm->cm_complete = mprsas_remove_volume;
  473         cm->cm_complete_data = (void *)(uintptr_t)handle;
  474 
  475         mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
  476             __func__, targ->tid);
  477         mprsas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
  478 
  479         mpr_map_command(sc, cm);
  480 }
  481 
  482 /*
  483  * The firmware performs debounce on the link to avoid transient link errors
  484  * and false removals.  When it does decide that link has been lost and a
  485  * device needs to go away, it expects that the host will perform a target reset
  486  * and then an op remove.  The reset has the side-effect of aborting any
  487  * outstanding requests for the device, which is required for the op-remove to
  488  * succeed.  It's not clear if the host should check for the device coming back
  489  * alive after the reset.
  490  */
  491 void
  492 mprsas_prepare_remove(struct mprsas_softc *sassc, uint16_t handle)
  493 {
  494         MPI2_SCSI_TASK_MANAGE_REQUEST *req;
  495         struct mpr_softc *sc;
  496         struct mpr_command *cm;
  497         struct mprsas_target *targ = NULL;
  498 
  499         MPR_FUNCTRACE(sassc->sc);
  500 
  501         sc = sassc->sc;
  502 
  503         targ = mprsas_find_target_by_handle(sassc, 0, handle);
  504         if (targ == NULL) {
  505                 /* FIXME: what is the action? */
  506                 /* We don't know about this device? */
  507                 mpr_dprint(sc, MPR_ERROR, "%s : invalid handle 0x%x \n",
  508                     __func__, handle);
  509                 return;
  510         }
  511 
  512         targ->flags |= MPRSAS_TARGET_INREMOVAL;
  513 
  514         cm = mprsas_alloc_tm(sc);
  515         if (cm == NULL) {
  516                 mpr_dprint(sc, MPR_ERROR, "%s: command alloc failure\n",
  517                     __func__);
  518                 return;
  519         }
  520 
  521         mprsas_rescan_target(sc, targ);
  522 
  523         req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
  524         memset(req, 0, sizeof(*req));
  525         req->DevHandle = htole16(targ->handle);
  526         req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
  527         req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
  528 
  529         /* SAS Hard Link Reset / SATA Link Reset */
  530         req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
  531 
  532         cm->cm_targ = targ;
  533         cm->cm_data = NULL;
  534         cm->cm_desc.HighPriority.RequestFlags =
  535             MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
  536         cm->cm_complete = mprsas_remove_device;
  537         cm->cm_complete_data = (void *)(uintptr_t)handle;
  538 
  539         mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
  540             __func__, targ->tid);
  541         mprsas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
  542 
  543         mpr_map_command(sc, cm);
  544 }
  545 
  546 static void
  547 mprsas_remove_device(struct mpr_softc *sc, struct mpr_command *tm)
  548 {
  549         MPI2_SCSI_TASK_MANAGE_REPLY *reply;
  550         MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
  551         struct mprsas_target *targ;
  552         struct mpr_command *next_cm;
  553         uint16_t handle;
  554 
  555         MPR_FUNCTRACE(sc);
  556 
  557         reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
  558         handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
  559         targ = tm->cm_targ;
  560 
  561         /*
  562          * Currently there should be no way we can hit this case.  It only
  563          * happens when we have a failure to allocate chain frames, and
  564          * task management commands don't have S/G lists.
  565          */
  566         if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
  567                 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for remove of "
  568                     "handle %#04x! This should not happen!\n", __func__,
  569                     tm->cm_flags, handle);
  570         }
  571 
  572         if (reply == NULL) {
  573                 /* XXX retry the remove after the diag reset completes? */
  574                 mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
  575                     "0x%04x\n", __func__, handle);
  576                 mprsas_free_tm(sc, tm);
  577                 return;
  578         }
  579 
  580         if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
  581             MPI2_IOCSTATUS_SUCCESS) {
  582                 mpr_dprint(sc, MPR_ERROR, "IOCStatus = 0x%x while resetting "
  583                     "device 0x%x\n", le16toh(reply->IOCStatus), handle);
  584         }
  585 
  586         mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
  587             le32toh(reply->TerminationCount));
  588         mpr_free_reply(sc, tm->cm_reply_data);
  589         tm->cm_reply = NULL;    /* Ensures the reply won't get re-freed */
  590 
  591         /* Reuse the existing command */
  592         req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
  593         memset(req, 0, sizeof(*req));
  594         req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
  595         req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
  596         req->DevHandle = htole16(handle);
  597         tm->cm_data = NULL;
  598         tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
  599         tm->cm_complete = mprsas_remove_complete;
  600         tm->cm_complete_data = (void *)(uintptr_t)handle;
  601 
  602         mpr_map_command(sc, tm);
  603 
  604         mpr_dprint(sc, MPR_INFO, "clearing target %u handle 0x%04x\n",
  605             targ->tid, handle);
  606         if (targ->encl_level_valid) {
  607                 mpr_dprint(sc, MPR_INFO, "At enclosure level %d, slot %d, "
  608                     "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
  609                     targ->connector_name);
  610         }
  611         TAILQ_FOREACH_SAFE(tm, &targ->commands, cm_link, next_cm) {
  612                 union ccb *ccb;
  613 
  614                 mpr_dprint(sc, MPR_XINFO, "Completing missed command %p\n", tm);
  615                 ccb = tm->cm_complete_data;
  616                 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
  617                 mprsas_scsiio_complete(sc, tm);
  618         }
  619 }
  620 
  621 static void
  622 mprsas_remove_complete(struct mpr_softc *sc, struct mpr_command *tm)
  623 {
  624         MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
  625         uint16_t handle;
  626         struct mprsas_target *targ;
  627         struct mprsas_lun *lun;
  628 
  629         MPR_FUNCTRACE(sc);
  630 
  631         reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
  632         handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
  633 
  634         /*
  635          * Currently there should be no way we can hit this case.  It only
  636          * happens when we have a failure to allocate chain frames, and
  637          * task management commands don't have S/G lists.
  638          */
  639         if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
  640                 mpr_dprint(sc, MPR_XINFO, "%s: cm_flags = %#x for remove of "
  641                     "handle %#04x! This should not happen!\n", __func__,
  642                     tm->cm_flags, handle);
  643                 mprsas_free_tm(sc, tm);
  644                 return;
  645         }
  646 
  647         if (reply == NULL) {
  648                 /* most likely a chip reset */
  649                 mpr_dprint(sc, MPR_FAULT, "%s NULL reply removing device "
  650                     "0x%04x\n", __func__, handle);
  651                 mprsas_free_tm(sc, tm);
  652                 return;
  653         }
  654 
  655         mpr_dprint(sc, MPR_XINFO, "%s on handle 0x%04x, IOCStatus= 0x%x\n",
  656             __func__, handle, le16toh(reply->IOCStatus));
  657 
  658         /*
  659          * Don't clear target if remove fails because things will get confusing.
  660          * Leave the devname and sasaddr intact so that we know to avoid reusing
  661          * this target id if possible, and so we can assign the same target id
  662          * to this device if it comes back in the future.
  663          */
  664         if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
  665             MPI2_IOCSTATUS_SUCCESS) {
  666                 targ = tm->cm_targ;
  667                 targ->handle = 0x0;
  668                 targ->encl_handle = 0x0;
  669                 targ->encl_level_valid = 0x0;
  670                 targ->encl_level = 0x0;
  671                 targ->connector_name[0] = ' ';
  672                 targ->connector_name[1] = ' ';
  673                 targ->connector_name[2] = ' ';
  674                 targ->connector_name[3] = ' ';
  675                 targ->encl_slot = 0x0;
  676                 targ->exp_dev_handle = 0x0;
  677                 targ->phy_num = 0x0;
  678                 targ->linkrate = 0x0;
  679                 targ->devinfo = 0x0;
  680                 targ->flags = 0x0;
  681                 targ->scsi_req_desc_type = 0;
  682                 
  683                 while (!SLIST_EMPTY(&targ->luns)) {
  684                         lun = SLIST_FIRST(&targ->luns);
  685                         SLIST_REMOVE_HEAD(&targ->luns, lun_link);
  686                         free(lun, M_MPR);
  687                 }
  688         }
  689 
  690         mprsas_free_tm(sc, tm);
  691 }
  692 
  693 static int
  694 mprsas_register_events(struct mpr_softc *sc)
  695 {
  696         uint8_t events[16];
  697 
  698         bzero(events, 16);
  699         setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
  700         setbit(events, MPI2_EVENT_SAS_DISCOVERY);
  701         setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
  702         setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
  703         setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
  704         setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
  705         setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
  706         setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
  707         setbit(events, MPI2_EVENT_IR_VOLUME);
  708         setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
  709         setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
  710         setbit(events, MPI2_EVENT_TEMP_THRESHOLD);
  711         setbit(events, MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR);
  712         if (sc->facts->MsgVersion >= MPI2_VERSION_02_06) {
  713                 setbit(events, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION);
  714                 if (sc->mpr_flags & MPR_FLAGS_GEN35_IOC) {
  715                         setbit(events, MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE);
  716                         setbit(events, MPI2_EVENT_PCIE_ENUMERATION);
  717                         setbit(events, MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
  718                 }
  719         }
  720 
  721         mpr_register_events(sc, events, mprsas_evt_handler, NULL,
  722             &sc->sassc->mprsas_eh);
  723 
  724         return (0);
  725 }
  726 
  727 int
  728 mpr_attach_sas(struct mpr_softc *sc)
  729 {
  730         struct mprsas_softc *sassc;
  731         cam_status status;
  732         int unit, error = 0, reqs;
  733 
  734         MPR_FUNCTRACE(sc);
  735 
  736         sassc = malloc(sizeof(struct mprsas_softc), M_MPR, M_WAITOK|M_ZERO);
  737         if (!sassc) {
  738                 device_printf(sc->mpr_dev, "Cannot allocate memory %s %d\n",
  739                     __func__, __LINE__);
  740                 return (ENOMEM);
  741         }
  742 
  743         /*
  744          * XXX MaxTargets could change during a reinit.  Since we don't
  745          * resize the targets[] array during such an event, cache the value
  746          * of MaxTargets here so that we don't get into trouble later.  This
  747          * should move into the reinit logic.
  748          */
  749         sassc->maxtargets = sc->facts->MaxTargets + sc->facts->MaxVolumes;
  750         sassc->targets = malloc(sizeof(struct mprsas_target) *
  751             sassc->maxtargets, M_MPR, M_WAITOK|M_ZERO);
  752         if (!sassc->targets) {
  753                 device_printf(sc->mpr_dev, "Cannot allocate memory %s %d\n",
  754                     __func__, __LINE__);
  755                 free(sassc, M_MPR);
  756                 return (ENOMEM);
  757         }
  758         sc->sassc = sassc;
  759         sassc->sc = sc;
  760 
  761         reqs = sc->num_reqs - sc->num_prireqs - 1;
  762         if ((sassc->devq = cam_simq_alloc(reqs)) == NULL) {
  763                 mpr_dprint(sc, MPR_ERROR, "Cannot allocate SIMQ\n");
  764                 error = ENOMEM;
  765                 goto out;
  766         }
  767 
  768         unit = device_get_unit(sc->mpr_dev);
  769         sassc->sim = cam_sim_alloc(mprsas_action, mprsas_poll, "mpr", sassc,
  770             unit, &sc->mpr_mtx, reqs, reqs, sassc->devq);
  771         if (sassc->sim == NULL) {
  772                 mpr_dprint(sc, MPR_ERROR, "Cannot allocate SIM\n");
  773                 error = EINVAL;
  774                 goto out;
  775         }
  776 
  777         TAILQ_INIT(&sassc->ev_queue);
  778 
  779         /* Initialize taskqueue for Event Handling */
  780         TASK_INIT(&sassc->ev_task, 0, mprsas_firmware_event_work, sc);
  781         sassc->ev_tq = taskqueue_create("mpr_taskq", M_NOWAIT | M_ZERO,
  782             taskqueue_thread_enqueue, &sassc->ev_tq);
  783         taskqueue_start_threads(&sassc->ev_tq, 1, PRIBIO, "%s taskq", 
  784             device_get_nameunit(sc->mpr_dev));
  785 
  786         mpr_lock(sc);
  787 
  788         /*
  789          * XXX There should be a bus for every port on the adapter, but since
  790          * we're just going to fake the topology for now, we'll pretend that
  791          * everything is just a target on a single bus.
  792          */
  793         if ((error = xpt_bus_register(sassc->sim, sc->mpr_dev, 0)) != 0) {
  794                 mpr_dprint(sc, MPR_ERROR, "Error %d registering SCSI bus\n",
  795                     error);
  796                 mpr_unlock(sc);
  797                 goto out;
  798         }
  799 
  800         /*
  801          * Assume that discovery events will start right away.
  802          *
  803          * Hold off boot until discovery is complete.
  804          */
  805         sassc->flags |= MPRSAS_IN_STARTUP | MPRSAS_IN_DISCOVERY;
  806         sc->sassc->startup_refcount = 0;
  807         mprsas_startup_increment(sassc);
  808 
  809         callout_init(&sassc->discovery_callout, 1 /*mpsafe*/);
  810 
  811         /*
  812          * Register for async events so we can determine the EEDP
  813          * capabilities of devices.
  814          */
  815         status = xpt_create_path(&sassc->path, /*periph*/NULL,
  816             cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
  817             CAM_LUN_WILDCARD);
  818         if (status != CAM_REQ_CMP) {
  819                 mpr_printf(sc, "Error %#x creating sim path\n", status);
  820                 sassc->path = NULL;
  821         } else {
  822                 int event;
  823 
  824 #if (__FreeBSD_version >= 1000006) || \
  825     ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
  826                 event = AC_ADVINFO_CHANGED | AC_FOUND_DEVICE;
  827 #else
  828                 event = AC_FOUND_DEVICE;
  829 #endif
  830 
  831                 /*
  832                  * Prior to the CAM locking improvements, we can't call
  833                  * xpt_register_async() with a particular path specified.
  834                  *
  835                  * If a path isn't specified, xpt_register_async() will
  836                  * generate a wildcard path and acquire the XPT lock while
  837                  * it calls xpt_action() to execute the XPT_SASYNC_CB CCB.
  838                  * It will then drop the XPT lock once that is done.
  839                  * 
  840                  * If a path is specified for xpt_register_async(), it will
  841                  * not acquire and drop the XPT lock around the call to
  842                  * xpt_action().  xpt_action() asserts that the caller
  843                  * holds the SIM lock, so the SIM lock has to be held when
  844                  * calling xpt_register_async() when the path is specified.
  845                  * 
  846                  * But xpt_register_async calls xpt_for_all_devices(),
  847                  * which calls xptbustraverse(), which will acquire each
  848                  * SIM lock.  When it traverses our particular bus, it will
  849                  * necessarily acquire the SIM lock, which will lead to a
  850                  * recursive lock acquisition.
  851                  * 
  852                  * The CAM locking changes fix this problem by acquiring
  853                  * the XPT topology lock around bus traversal in
  854                  * xptbustraverse(), so the caller can hold the SIM lock
  855                  * and it does not cause a recursive lock acquisition.
  856                  *
  857                  * These __FreeBSD_version values are approximate, especially
  858                  * for stable/10, which is two months later than the actual
  859                  * change.
  860                  */
  861 
  862 #if (__FreeBSD_version < 1000703) || \
  863     ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
  864                 mpr_unlock(sc);
  865                 status = xpt_register_async(event, mprsas_async, sc,
  866                                             NULL);
  867                 mpr_lock(sc);
  868 #else
  869                 status = xpt_register_async(event, mprsas_async, sc,
  870                                             sassc->path);
  871 #endif
  872 
  873                 if (status != CAM_REQ_CMP) {
  874                         mpr_dprint(sc, MPR_ERROR,
  875                             "Error %#x registering async handler for "
  876                             "AC_ADVINFO_CHANGED events\n", status);
  877                         xpt_free_path(sassc->path);
  878                         sassc->path = NULL;
  879                 }
  880         }
  881         if (status != CAM_REQ_CMP) {
  882                 /*
  883                  * EEDP use is the exception, not the rule.
  884                  * Warn the user, but do not fail to attach.
  885                  */
  886                 mpr_printf(sc, "EEDP capabilities disabled.\n");
  887         }
  888 
  889         mpr_unlock(sc);
  890 
  891         mprsas_register_events(sc);
  892 out:
  893         if (error)
  894                 mpr_detach_sas(sc);
  895         return (error);
  896 }
  897 
  898 int
  899 mpr_detach_sas(struct mpr_softc *sc)
  900 {
  901         struct mprsas_softc *sassc;
  902         struct mprsas_lun *lun, *lun_tmp;
  903         struct mprsas_target *targ;
  904         int i;
  905 
  906         MPR_FUNCTRACE(sc);
  907 
  908         if (sc->sassc == NULL)
  909                 return (0);
  910 
  911         sassc = sc->sassc;
  912         mpr_deregister_events(sc, sassc->mprsas_eh);
  913 
  914         /*
  915          * Drain and free the event handling taskqueue with the lock
  916          * unheld so that any parallel processing tasks drain properly
  917          * without deadlocking.
  918          */
  919         if (sassc->ev_tq != NULL)
  920                 taskqueue_free(sassc->ev_tq);
  921 
  922         /* Make sure CAM doesn't wedge if we had to bail out early. */
  923         mpr_lock(sc);
  924 
  925         /* Deregister our async handler */
  926         if (sassc->path != NULL) {
  927                 xpt_register_async(0, mprsas_async, sc, sassc->path);
  928                 xpt_free_path(sassc->path);
  929                 sassc->path = NULL;
  930         }
  931 
  932         if (sassc->flags & MPRSAS_IN_STARTUP)
  933                 xpt_release_simq(sassc->sim, 1);
  934 
  935         if (sassc->sim != NULL) {
  936                 xpt_bus_deregister(cam_sim_path(sassc->sim));
  937                 cam_sim_free(sassc->sim, FALSE);
  938         }
  939 
  940         mpr_unlock(sc);
  941 
  942         if (sassc->devq != NULL)
  943                 cam_simq_free(sassc->devq);
  944 
  945         for (i = 0; i < sassc->maxtargets; i++) {
  946                 targ = &sassc->targets[i];
  947                 SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
  948                         free(lun, M_MPR);
  949                 }
  950         }
  951         free(sassc->targets, M_MPR);
  952         free(sassc, M_MPR);
  953         sc->sassc = NULL;
  954 
  955         return (0);
  956 }
  957 
  958 void
  959 mprsas_discovery_end(struct mprsas_softc *sassc)
  960 {
  961         struct mpr_softc *sc = sassc->sc;
  962 
  963         MPR_FUNCTRACE(sc);
  964 
  965         if (sassc->flags & MPRSAS_DISCOVERY_TIMEOUT_PENDING)
  966                 callout_stop(&sassc->discovery_callout);
  967 
  968         /*
  969          * After discovery has completed, check the mapping table for any
  970          * missing devices and update their missing counts. Only do this once
  971          * whenever the driver is initialized so that missing counts aren't
  972          * updated unnecessarily. Note that just because discovery has
  973          * completed doesn't mean that events have been processed yet. The
  974          * check_devices function is a callout timer that checks if ALL devices
  975          * are missing. If so, it will wait a little longer for events to
  976          * complete and keep resetting itself until some device in the mapping
  977          * table is not missing, meaning that event processing has started.
  978          */
  979         if (sc->track_mapping_events) {
  980                 mpr_dprint(sc, MPR_XINFO | MPR_MAPPING, "Discovery has "
  981                     "completed. Check for missing devices in the mapping "
  982                     "table.\n");
  983                 callout_reset(&sc->device_check_callout,
  984                     MPR_MISSING_CHECK_DELAY * hz, mpr_mapping_check_devices,
  985                     sc);
  986         }
  987 }
  988 
  989 static void
  990 mprsas_action(struct cam_sim *sim, union ccb *ccb)
  991 {
  992         struct mprsas_softc *sassc;
  993 
  994         sassc = cam_sim_softc(sim);
  995 
  996         MPR_FUNCTRACE(sassc->sc);
  997         mpr_dprint(sassc->sc, MPR_TRACE, "ccb func_code 0x%x\n",
  998             ccb->ccb_h.func_code);
  999         mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED);
 1000 
 1001         switch (ccb->ccb_h.func_code) {
 1002         case XPT_PATH_INQ:
 1003         {
 1004                 struct ccb_pathinq *cpi = &ccb->cpi;
 1005                 struct mpr_softc *sc = sassc->sc;
 1006                 uint8_t sges_per_frame;
 1007 
 1008                 cpi->version_num = 1;
 1009                 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
 1010                 cpi->target_sprt = 0;
 1011 #if (__FreeBSD_version >= 1000039) || \
 1012     ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
 1013                 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
 1014 #else
 1015                 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
 1016 #endif
 1017                 cpi->hba_eng_cnt = 0;
 1018                 cpi->max_target = sassc->maxtargets - 1;
 1019                 cpi->max_lun = 255;
 1020 
 1021                 /*
 1022                  * initiator_id is set here to an ID outside the set of valid
 1023                  * target IDs (including volumes).
 1024                  */
 1025                 cpi->initiator_id = sassc->maxtargets;
 1026                 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
 1027                 strlcpy(cpi->hba_vid, "Avago Tech", HBA_IDLEN);
 1028                 strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
 1029                 cpi->unit_number = cam_sim_unit(sim);
 1030                 cpi->bus_id = cam_sim_bus(sim);
 1031                 /*
 1032                  * XXXSLM-I think this needs to change based on config page or
 1033                  * something instead of hardcoded to 150000.
 1034                  */
 1035                 cpi->base_transfer_speed = 150000;
 1036                 cpi->transport = XPORT_SAS;
 1037                 cpi->transport_version = 0;
 1038                 cpi->protocol = PROTO_SCSI;
 1039                 cpi->protocol_version = SCSI_REV_SPC;
 1040 
 1041                 /*
 1042                  * Max IO Size is Page Size * the following:
 1043                  * ((SGEs per frame - 1 for chain element) *
 1044                  * Max Chain Depth) + 1 for no chain needed in last frame
 1045                  *
 1046                  * If user suggests a Max IO size to use, use the smaller of the
 1047                  * user's value and the calculated value as long as the user's
 1048                  * value is larger than 0. The user's value is in pages.
 1049                  */
 1050                 sges_per_frame = (sc->chain_frame_size /
 1051                     sizeof(MPI2_IEEE_SGE_SIMPLE64)) - 1;
 1052                 cpi->maxio = (sges_per_frame * sc->facts->MaxChainDepth) + 1;
 1053                 cpi->maxio *= PAGE_SIZE;
 1054                 if ((sc->max_io_pages > 0) && (sc->max_io_pages * PAGE_SIZE <
 1055                     cpi->maxio))
 1056                         cpi->maxio = sc->max_io_pages * PAGE_SIZE;
 1057                 sc->maxio = cpi->maxio;
 1058                 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
 1059                 break;
 1060         }
 1061         case XPT_GET_TRAN_SETTINGS:
 1062         {
 1063                 struct ccb_trans_settings       *cts;
 1064                 struct ccb_trans_settings_sas   *sas;
 1065                 struct ccb_trans_settings_scsi  *scsi;
 1066                 struct mprsas_target *targ;
 1067 
 1068                 cts = &ccb->cts;
 1069                 sas = &cts->xport_specific.sas;
 1070                 scsi = &cts->proto_specific.scsi;
 1071 
 1072                 KASSERT(cts->ccb_h.target_id < sassc->maxtargets,
 1073                     ("Target %d out of bounds in XPT_GET_TRAN_SETTINGS\n",
 1074                     cts->ccb_h.target_id));
 1075                 targ = &sassc->targets[cts->ccb_h.target_id];
 1076                 if (targ->handle == 0x0) {
 1077                         mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
 1078                         break;
 1079                 }
 1080 
 1081                 cts->protocol_version = SCSI_REV_SPC2;
 1082                 cts->transport = XPORT_SAS;
 1083                 cts->transport_version = 0;
 1084 
 1085                 sas->valid = CTS_SAS_VALID_SPEED;
 1086                 switch (targ->linkrate) {
 1087                 case 0x08:
 1088                         sas->bitrate = 150000;
 1089                         break;
 1090                 case 0x09:
 1091                         sas->bitrate = 300000;
 1092                         break;
 1093                 case 0x0a:
 1094                         sas->bitrate = 600000;
 1095                         break;
 1096                 case 0x0b:
 1097                         sas->bitrate = 1200000;
 1098                         break;
 1099                 default:
 1100                         sas->valid = 0;
 1101                 }
 1102 
 1103                 cts->protocol = PROTO_SCSI;
 1104                 scsi->valid = CTS_SCSI_VALID_TQ;
 1105                 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
 1106 
 1107                 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
 1108                 break;
 1109         }
 1110         case XPT_CALC_GEOMETRY:
 1111                 cam_calc_geometry(&ccb->ccg, /*extended*/1);
 1112                 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
 1113                 break;
 1114         case XPT_RESET_DEV:
 1115                 mpr_dprint(sassc->sc, MPR_XINFO, "mprsas_action "
 1116                     "XPT_RESET_DEV\n");
 1117                 mprsas_action_resetdev(sassc, ccb);
 1118                 return;
 1119         case XPT_RESET_BUS:
 1120         case XPT_ABORT:
 1121         case XPT_TERM_IO:
 1122                 mpr_dprint(sassc->sc, MPR_XINFO, "mprsas_action faking success "
 1123                     "for abort or reset\n");
 1124                 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
 1125                 break;
 1126         case XPT_SCSI_IO:
 1127                 mprsas_action_scsiio(sassc, ccb);
 1128                 return;
 1129 #if __FreeBSD_version >= 900026
 1130         case XPT_SMP_IO:
 1131                 mprsas_action_smpio(sassc, ccb);
 1132                 return;
 1133 #endif
 1134         default:
 1135                 mprsas_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL);
 1136                 break;
 1137         }
 1138         xpt_done(ccb);
 1139 
 1140 }
 1141 
 1142 static void
 1143 mprsas_announce_reset(struct mpr_softc *sc, uint32_t ac_code,
 1144     target_id_t target_id, lun_id_t lun_id)
 1145 {
 1146         path_id_t path_id = cam_sim_path(sc->sassc->sim);
 1147         struct cam_path *path;
 1148 
 1149         mpr_dprint(sc, MPR_XINFO, "%s code %x target %d lun %jx\n", __func__,
 1150             ac_code, target_id, (uintmax_t)lun_id);
 1151 
 1152         if (xpt_create_path(&path, NULL, 
 1153                 path_id, target_id, lun_id) != CAM_REQ_CMP) {
 1154                 mpr_dprint(sc, MPR_ERROR, "unable to create path for reset "
 1155                     "notification\n");
 1156                 return;
 1157         }
 1158 
 1159         xpt_async(ac_code, path, NULL);
 1160         xpt_free_path(path);
 1161 }
 1162 
 1163 static void 
 1164 mprsas_complete_all_commands(struct mpr_softc *sc)
 1165 {
 1166         struct mpr_command *cm;
 1167         int i;
 1168         int completed;
 1169 
 1170         MPR_FUNCTRACE(sc);
 1171         mtx_assert(&sc->mpr_mtx, MA_OWNED);
 1172 
 1173         /* complete all commands with a NULL reply */
 1174         for (i = 1; i < sc->num_reqs; i++) {
 1175                 cm = &sc->commands[i];
 1176                 cm->cm_reply = NULL;
 1177                 completed = 0;
 1178 
 1179                 if (cm->cm_flags & MPR_CM_FLAGS_POLLED)
 1180                         cm->cm_flags |= MPR_CM_FLAGS_COMPLETE;
 1181 
 1182                 if (cm->cm_complete != NULL) {
 1183                         mprsas_log_command(cm, MPR_RECOVERY,
 1184                             "completing cm %p state %x ccb %p for diag reset\n",
 1185                             cm, cm->cm_state, cm->cm_ccb);
 1186                         cm->cm_complete(sc, cm);
 1187                         completed = 1;
 1188                 }
 1189 
 1190                 if (cm->cm_flags & MPR_CM_FLAGS_WAKEUP) {
 1191                         mprsas_log_command(cm, MPR_RECOVERY,
 1192                             "waking up cm %p state %x ccb %p for diag reset\n", 
 1193                             cm, cm->cm_state, cm->cm_ccb);
 1194                         wakeup(cm);
 1195                         completed = 1;
 1196                 }
 1197 
 1198                 if (cm->cm_sc->io_cmds_active != 0)
 1199                         cm->cm_sc->io_cmds_active--;
 1200                 
 1201                 if ((completed == 0) && (cm->cm_state != MPR_CM_STATE_FREE)) {
 1202                         /* this should never happen, but if it does, log */
 1203                         mprsas_log_command(cm, MPR_RECOVERY,
 1204                             "cm %p state %x flags 0x%x ccb %p during diag "
 1205                             "reset\n", cm, cm->cm_state, cm->cm_flags,
 1206                             cm->cm_ccb);
 1207                 }
 1208         }
 1209 }
 1210 
 1211 void
 1212 mprsas_handle_reinit(struct mpr_softc *sc)
 1213 {
 1214         int i;
 1215 
 1216         /* Go back into startup mode and freeze the simq, so that CAM
 1217          * doesn't send any commands until after we've rediscovered all
 1218          * targets and found the proper device handles for them.
 1219          *
 1220          * After the reset, portenable will trigger discovery, and after all
 1221          * discovery-related activities have finished, the simq will be
 1222          * released.
 1223          */
 1224         mpr_dprint(sc, MPR_INIT, "%s startup\n", __func__);
 1225         sc->sassc->flags |= MPRSAS_IN_STARTUP;
 1226         sc->sassc->flags |= MPRSAS_IN_DISCOVERY;
 1227         mprsas_startup_increment(sc->sassc);
 1228 
 1229         /* notify CAM of a bus reset */
 1230         mprsas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD, 
 1231             CAM_LUN_WILDCARD);
 1232 
 1233         /* complete and cleanup after all outstanding commands */
 1234         mprsas_complete_all_commands(sc);
 1235 
 1236         mpr_dprint(sc, MPR_INIT, "%s startup %u after command completion\n",
 1237             __func__, sc->sassc->startup_refcount);
 1238 
 1239         /* zero all the target handles, since they may change after the
 1240          * reset, and we have to rediscover all the targets and use the new
 1241          * handles.  
 1242          */
 1243         for (i = 0; i < sc->sassc->maxtargets; i++) {
 1244                 if (sc->sassc->targets[i].outstanding != 0)
 1245                         mpr_dprint(sc, MPR_INIT, "target %u outstanding %u\n", 
 1246                             i, sc->sassc->targets[i].outstanding);
 1247                 sc->sassc->targets[i].handle = 0x0;
 1248                 sc->sassc->targets[i].exp_dev_handle = 0x0;
 1249                 sc->sassc->targets[i].outstanding = 0;
 1250                 sc->sassc->targets[i].flags = MPRSAS_TARGET_INDIAGRESET;
 1251         }
 1252 }
 1253 static void
 1254 mprsas_tm_timeout(void *data)
 1255 {
 1256         struct mpr_command *tm = data;
 1257         struct mpr_softc *sc = tm->cm_sc;
 1258 
 1259         mtx_assert(&sc->mpr_mtx, MA_OWNED);
 1260 
 1261         mprsas_log_command(tm, MPR_INFO|MPR_RECOVERY, "task mgmt %p timed "
 1262             "out\n", tm);
 1263         mpr_reinit(sc);
 1264 }
 1265 
 1266 static void
 1267 mprsas_logical_unit_reset_complete(struct mpr_softc *sc, struct mpr_command *tm)
 1268 {
 1269         MPI2_SCSI_TASK_MANAGE_REPLY *reply;
 1270         MPI2_SCSI_TASK_MANAGE_REQUEST *req;
 1271         unsigned int cm_count = 0;
 1272         struct mpr_command *cm;
 1273         struct mprsas_target *targ;
 1274 
 1275         callout_stop(&tm->cm_callout);
 1276 
 1277         req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
 1278         reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
 1279         targ = tm->cm_targ;
 1280 
 1281         /*
 1282          * Currently there should be no way we can hit this case.  It only
 1283          * happens when we have a failure to allocate chain frames, and
 1284          * task management commands don't have S/G lists.
 1285          */
 1286         if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
 1287                 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for LUN reset! "
 1288                     "This should not happen!\n", __func__, tm->cm_flags);
 1289                 mprsas_free_tm(sc, tm);
 1290                 return;
 1291         }
 1292 
 1293         if (reply == NULL) {
 1294                 mprsas_log_command(tm, MPR_RECOVERY, "NULL reset reply for tm "
 1295                     "%p\n", tm);
 1296                 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
 1297                         /* this completion was due to a reset, just cleanup */
 1298                         targ->tm = NULL;
 1299                         mprsas_free_tm(sc, tm);
 1300                 }
 1301                 else {
 1302                         /* we should have gotten a reply. */
 1303                         mpr_reinit(sc);
 1304                 }
 1305                 return;
 1306         }
 1307 
 1308         mprsas_log_command(tm, MPR_RECOVERY,
 1309             "logical unit reset status 0x%x code 0x%x count %u\n",
 1310             le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
 1311             le32toh(reply->TerminationCount));
 1312                 
 1313         /* See if there are any outstanding commands for this LUN.
 1314          * This could be made more efficient by using a per-LU data
 1315          * structure of some sort.
 1316          */
 1317         TAILQ_FOREACH(cm, &targ->commands, cm_link) {
 1318                 if (cm->cm_lun == tm->cm_lun)
 1319                         cm_count++;
 1320         }
 1321 
 1322         if (cm_count == 0) {
 1323                 mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
 1324                     "logical unit %u finished recovery after reset\n",
 1325                     tm->cm_lun, tm);
 1326 
 1327                 mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid, 
 1328                     tm->cm_lun);
 1329 
 1330                 /* we've finished recovery for this logical unit.  check and
 1331                  * see if some other logical unit has a timedout command
 1332                  * that needs to be processed.
 1333                  */
 1334                 cm = TAILQ_FIRST(&targ->timedout_commands);
 1335                 if (cm) {
 1336                         mprsas_send_abort(sc, tm, cm);
 1337                 }
 1338                 else {
 1339                         targ->tm = NULL;
 1340                         mprsas_free_tm(sc, tm);
 1341                 }
 1342         }
 1343         else {
 1344                 /* if we still have commands for this LUN, the reset
 1345                  * effectively failed, regardless of the status reported.
 1346                  * Escalate to a target reset.
 1347                  */
 1348                 mprsas_log_command(tm, MPR_RECOVERY,
 1349                     "logical unit reset complete for tm %p, but still have %u "
 1350                     "command(s)\n", tm, cm_count);
 1351                 mprsas_send_reset(sc, tm,
 1352                     MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
 1353         }
 1354 }
 1355 
 1356 static void
 1357 mprsas_target_reset_complete(struct mpr_softc *sc, struct mpr_command *tm)
 1358 {
 1359         MPI2_SCSI_TASK_MANAGE_REPLY *reply;
 1360         MPI2_SCSI_TASK_MANAGE_REQUEST *req;
 1361         struct mprsas_target *targ;
 1362 
 1363         callout_stop(&tm->cm_callout);
 1364 
 1365         req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
 1366         reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
 1367         targ = tm->cm_targ;
 1368 
 1369         /*
 1370          * Currently there should be no way we can hit this case.  It only
 1371          * happens when we have a failure to allocate chain frames, and
 1372          * task management commands don't have S/G lists.
 1373          */
 1374         if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
 1375                 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for target "
 1376                     "reset! This should not happen!\n", __func__, tm->cm_flags);
 1377                 mprsas_free_tm(sc, tm);
 1378                 return;
 1379         }
 1380 
 1381         if (reply == NULL) {
 1382                 mprsas_log_command(tm, MPR_RECOVERY, "NULL reset reply for tm "
 1383                     "%p\n", tm);
 1384                 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
 1385                         /* this completion was due to a reset, just cleanup */
 1386                         targ->tm = NULL;
 1387                         mprsas_free_tm(sc, tm);
 1388                 }
 1389                 else {
 1390                         /* we should have gotten a reply. */
 1391                         mpr_reinit(sc);
 1392                 }
 1393                 return;
 1394         }
 1395 
 1396         mprsas_log_command(tm, MPR_RECOVERY,
 1397             "target reset status 0x%x code 0x%x count %u\n",
 1398             le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
 1399             le32toh(reply->TerminationCount));
 1400 
 1401         if (targ->outstanding == 0) {
 1402                 /* we've finished recovery for this target and all
 1403                  * of its logical units.
 1404                  */
 1405                 mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
 1406                     "recovery finished after target reset\n");
 1407 
 1408                 mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
 1409                     CAM_LUN_WILDCARD);
 1410 
 1411                 targ->tm = NULL;
 1412                 mprsas_free_tm(sc, tm);
 1413         }
 1414         else {
 1415                 /* after a target reset, if this target still has
 1416                  * outstanding commands, the reset effectively failed,
 1417                  * regardless of the status reported.  escalate.
 1418                  */
 1419                 mprsas_log_command(tm, MPR_RECOVERY,
 1420                     "target reset complete for tm %p, but still have %u "
 1421                     "command(s)\n", tm, targ->outstanding);
 1422                 mpr_reinit(sc);
 1423         }
 1424 }
 1425 
 1426 #define MPR_RESET_TIMEOUT 30
 1427 
 1428 int
 1429 mprsas_send_reset(struct mpr_softc *sc, struct mpr_command *tm, uint8_t type)
 1430 {
 1431         MPI2_SCSI_TASK_MANAGE_REQUEST *req;
 1432         struct mprsas_target *target;
 1433         int err;
 1434 
 1435         target = tm->cm_targ;
 1436         if (target->handle == 0) {
 1437                 mpr_dprint(sc, MPR_ERROR, "%s null devhandle for target_id "
 1438                     "%d\n", __func__, target->tid);
 1439                 return -1;
 1440         }
 1441 
 1442         req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
 1443         req->DevHandle = htole16(target->handle);
 1444         req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
 1445         req->TaskType = type;
 1446 
 1447         if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
 1448                 /* XXX Need to handle invalid LUNs */
 1449                 MPR_SET_LUN(req->LUN, tm->cm_lun);
 1450                 tm->cm_targ->logical_unit_resets++;
 1451                 mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
 1452                     "sending logical unit reset\n");
 1453                 tm->cm_complete = mprsas_logical_unit_reset_complete;
 1454                 mprsas_prepare_for_tm(sc, tm, target, tm->cm_lun);
 1455         }
 1456         else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
 1457                 /*
 1458                  * Target reset method =
 1459                  *     SAS Hard Link Reset / SATA Link Reset
 1460                  */
 1461                 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
 1462                 tm->cm_targ->target_resets++;
 1463                 mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
 1464                     "sending target reset\n");
 1465                 tm->cm_complete = mprsas_target_reset_complete;
 1466                 mprsas_prepare_for_tm(sc, tm, target, CAM_LUN_WILDCARD);
 1467         }
 1468         else {
 1469                 mpr_dprint(sc, MPR_ERROR, "unexpected reset type 0x%x\n", type);
 1470                 return -1;
 1471         }
 1472 
 1473         mpr_dprint(sc, MPR_INFO, "to target %u handle 0x%04x\n", target->tid,
 1474             target->handle);
 1475         if (target->encl_level_valid) {
 1476                 mpr_dprint(sc, MPR_INFO, "At enclosure level %d, slot %d, "
 1477                     "connector name (%4s)\n", target->encl_level,
 1478                     target->encl_slot, target->connector_name);
 1479         }
 1480 
 1481         tm->cm_data = NULL;
 1482         tm->cm_desc.HighPriority.RequestFlags =
 1483             MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
 1484         tm->cm_complete_data = (void *)tm;
 1485 
 1486         callout_reset(&tm->cm_callout, MPR_RESET_TIMEOUT * hz,
 1487             mprsas_tm_timeout, tm);
 1488 
 1489         err = mpr_map_command(sc, tm);
 1490         if (err)
 1491                 mprsas_log_command(tm, MPR_RECOVERY,
 1492                     "error %d sending reset type %u\n", err, type);
 1493 
 1494         return err;
 1495 }
 1496 
 1497 
 1498 static void
 1499 mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *tm)
 1500 {
 1501         struct mpr_command *cm;
 1502         MPI2_SCSI_TASK_MANAGE_REPLY *reply;
 1503         MPI2_SCSI_TASK_MANAGE_REQUEST *req;
 1504         struct mprsas_target *targ;
 1505 
 1506         callout_stop(&tm->cm_callout);
 1507 
 1508         req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
 1509         reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
 1510         targ = tm->cm_targ;
 1511 
 1512         /*
 1513          * Currently there should be no way we can hit this case.  It only
 1514          * happens when we have a failure to allocate chain frames, and
 1515          * task management commands don't have S/G lists.
 1516          */
 1517         if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
 1518                 mprsas_log_command(tm, MPR_RECOVERY,
 1519                     "cm_flags = %#x for abort %p TaskMID %u!\n", 
 1520                     tm->cm_flags, tm, le16toh(req->TaskMID));
 1521                 mprsas_free_tm(sc, tm);
 1522                 return;
 1523         }
 1524 
 1525         if (reply == NULL) {
 1526                 mprsas_log_command(tm, MPR_RECOVERY,
 1527                     "NULL abort reply for tm %p TaskMID %u\n", 
 1528                     tm, le16toh(req->TaskMID));
 1529                 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
 1530                         /* this completion was due to a reset, just cleanup */
 1531                         targ->tm = NULL;
 1532                         mprsas_free_tm(sc, tm);
 1533                 }
 1534                 else {
 1535                         /* we should have gotten a reply. */
 1536                         mpr_reinit(sc);
 1537                 }
 1538                 return;
 1539         }
 1540 
 1541         mprsas_log_command(tm, MPR_RECOVERY,
 1542             "abort TaskMID %u status 0x%x code 0x%x count %u\n",
 1543             le16toh(req->TaskMID),
 1544             le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
 1545             le32toh(reply->TerminationCount));
 1546 
 1547         cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
 1548         if (cm == NULL) {
 1549                 /* if there are no more timedout commands, we're done with
 1550                  * error recovery for this target.
 1551                  */
 1552                 mprsas_log_command(tm, MPR_RECOVERY,
 1553                     "finished recovery after aborting TaskMID %u\n",
 1554                     le16toh(req->TaskMID));
 1555 
 1556                 targ->tm = NULL;
 1557                 mprsas_free_tm(sc, tm);
 1558         }
 1559         else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
 1560                 /* abort success, but we have more timedout commands to abort */
 1561                 mprsas_log_command(tm, MPR_RECOVERY,
 1562                     "continuing recovery after aborting TaskMID %u\n",
 1563                     le16toh(req->TaskMID));
 1564                 
 1565                 mprsas_send_abort(sc, tm, cm);
 1566         }
 1567         else {
 1568                 /* we didn't get a command completion, so the abort
 1569                  * failed as far as we're concerned.  escalate.
 1570                  */
 1571                 mprsas_log_command(tm, MPR_RECOVERY,
 1572                     "abort failed for TaskMID %u tm %p\n",
 1573                     le16toh(req->TaskMID), tm);
 1574 
 1575                 mprsas_send_reset(sc, tm, 
 1576                     MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
 1577         }
 1578 }
 1579 
 1580 #define MPR_ABORT_TIMEOUT 5
 1581 
 1582 static int
 1583 mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
 1584     struct mpr_command *cm)
 1585 {
 1586         MPI2_SCSI_TASK_MANAGE_REQUEST *req;
 1587         struct mprsas_target *targ;
 1588         int err;
 1589 
 1590         targ = cm->cm_targ;
 1591         if (targ->handle == 0) {
 1592                 mpr_dprint(sc, MPR_ERROR,"%s null devhandle for target_id %d\n",
 1593                     __func__, cm->cm_ccb->ccb_h.target_id);
 1594                 return -1;
 1595         }
 1596 
 1597         mprsas_log_command(cm, MPR_RECOVERY|MPR_INFO,
 1598             "Aborting command %p\n", cm);
 1599 
 1600         req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
 1601         req->DevHandle = htole16(targ->handle);
 1602         req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
 1603         req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
 1604 
 1605         /* XXX Need to handle invalid LUNs */
 1606         MPR_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
 1607 
 1608         req->TaskMID = htole16(cm->cm_desc.Default.SMID);
 1609 
 1610         tm->cm_data = NULL;
 1611         tm->cm_desc.HighPriority.RequestFlags =
 1612             MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
 1613         tm->cm_complete = mprsas_abort_complete;
 1614         tm->cm_complete_data = (void *)tm;
 1615         tm->cm_targ = cm->cm_targ;
 1616         tm->cm_lun = cm->cm_lun;
 1617 
 1618         callout_reset(&tm->cm_callout, MPR_ABORT_TIMEOUT * hz,
 1619             mprsas_tm_timeout, tm);
 1620 
 1621         targ->aborts++;
 1622 
 1623         mpr_dprint(sc, MPR_INFO, "Sending reset from %s for target ID %d\n",
 1624             __func__, targ->tid);
 1625         mprsas_prepare_for_tm(sc, tm, targ, tm->cm_lun);
 1626 
 1627         err = mpr_map_command(sc, tm);
 1628         if (err)
 1629                 mpr_dprint(sc, MPR_RECOVERY,
 1630                     "error %d sending abort for cm %p SMID %u\n",
 1631                     err, cm, req->TaskMID);
 1632         return err;
 1633 }
 1634 
 1635 static void
 1636 mprsas_scsiio_timeout(void *data)
 1637 {
 1638         struct mpr_softc *sc;
 1639         struct mpr_command *cm;
 1640         struct mprsas_target *targ;
 1641 
 1642         cm = (struct mpr_command *)data;
 1643         sc = cm->cm_sc;
 1644 
 1645         MPR_FUNCTRACE(sc);
 1646         mtx_assert(&sc->mpr_mtx, MA_OWNED);
 1647 
 1648         mpr_dprint(sc, MPR_XINFO, "Timeout checking cm %p\n", cm);
 1649 
 1650         /*
 1651          * Run the interrupt handler to make sure it's not pending.  This
 1652          * isn't perfect because the command could have already completed
 1653          * and been re-used, though this is unlikely.
 1654          */
 1655         mpr_intr_locked(sc);
 1656         if (cm->cm_state == MPR_CM_STATE_FREE) {
 1657                 mprsas_log_command(cm, MPR_XINFO,
 1658                     "SCSI command %p almost timed out\n", cm);
 1659                 return;
 1660         }
 1661 
 1662         if (cm->cm_ccb == NULL) {
 1663                 mpr_dprint(sc, MPR_ERROR, "command timeout with NULL ccb\n");
 1664                 return;
 1665         }
 1666 
 1667         targ = cm->cm_targ;
 1668         targ->timeouts++;
 1669 
 1670         mprsas_log_command(cm, MPR_ERROR, "command timeout %d cm %p target "
 1671             "%u, handle(0x%04x)\n", cm->cm_ccb->ccb_h.timeout, cm, targ->tid,
 1672             targ->handle);
 1673         if (targ->encl_level_valid) {
 1674                 mpr_dprint(sc, MPR_ERROR, "At enclosure level %d, slot %d, "
 1675                     "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
 1676                     targ->connector_name);
 1677         }
 1678 
 1679         /* XXX first, check the firmware state, to see if it's still
 1680          * operational.  if not, do a diag reset.
 1681          */
 1682         mprsas_set_ccbstatus(cm->cm_ccb, CAM_CMD_TIMEOUT);
 1683         cm->cm_state = MPR_CM_STATE_TIMEDOUT;
 1684         TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
 1685 
 1686         if (targ->tm != NULL) {
 1687                 /* target already in recovery, just queue up another
 1688                  * timedout command to be processed later.
 1689                  */
 1690                 mpr_dprint(sc, MPR_RECOVERY, "queued timedout cm %p for "
 1691                     "processing by tm %p\n", cm, targ->tm);
 1692         }
 1693         else if ((targ->tm = mprsas_alloc_tm(sc)) != NULL) {
 1694                 mpr_dprint(sc, MPR_RECOVERY, "timedout cm %p allocated tm %p\n",
 1695                     cm, targ->tm);
 1696 
 1697                 /* start recovery by aborting the first timedout command */
 1698                 mprsas_send_abort(sc, targ->tm, cm);
 1699         }
 1700         else {
 1701                 /* XXX queue this target up for recovery once a TM becomes
 1702                  * available.  The firmware only has a limited number of
 1703                  * HighPriority credits for the high priority requests used
 1704                  * for task management, and we ran out.
 1705                  * 
 1706                  * Isilon: don't worry about this for now, since we have
 1707                  * more credits than disks in an enclosure, and limit
 1708                  * ourselves to one TM per target for recovery.
 1709                  */
 1710                 mpr_dprint(sc, MPR_RECOVERY, "timedout cm %p failed to "
 1711                     "allocate a tm\n", cm);
 1712         }
 1713 }
 1714 
 1715 /** 
 1716  * mprsas_build_nvme_unmap - Build Native NVMe DSM command equivalent
 1717  *                           to SCSI Unmap.
 1718  * Return 0 - for success,
 1719  *        1 - to immediately return back the command with success status to CAM
 1720  *        negative value - to fallback to firmware path i.e. issue scsi unmap
 1721  *                         to FW without any translation.
 1722  */
 1723 static int
 1724 mprsas_build_nvme_unmap(struct mpr_softc *sc, struct mpr_command *cm,
 1725     union ccb *ccb, struct mprsas_target *targ)
 1726 {
 1727         Mpi26NVMeEncapsulatedRequest_t *req = NULL;
 1728         struct ccb_scsiio *csio;
 1729         struct unmap_parm_list *plist;
 1730         struct nvme_dsm_range *nvme_dsm_ranges = NULL;
 1731         struct nvme_command *c;
 1732         int i, res;
 1733         uint16_t ndesc, list_len, data_length;
 1734         struct mpr_prp_page *prp_page_info;
 1735         uint64_t nvme_dsm_ranges_dma_handle;
 1736 
 1737         csio = &ccb->csio;
 1738 #if __FreeBSD_version >= 1100103
 1739         list_len = (scsiio_cdb_ptr(csio)[7] << 8 | scsiio_cdb_ptr(csio)[8]);
 1740 #else
 1741         if (csio->ccb_h.flags & CAM_CDB_POINTER) {
 1742                 list_len = (ccb->csio.cdb_io.cdb_ptr[7] << 8 |
 1743                     ccb->csio.cdb_io.cdb_ptr[8]);
 1744         } else {
 1745                 list_len = (ccb->csio.cdb_io.cdb_bytes[7] << 8 |
 1746                     ccb->csio.cdb_io.cdb_bytes[8]);
 1747         }
 1748 #endif
 1749         if (!list_len) {
 1750                 mpr_dprint(sc, MPR_ERROR, "Parameter list length is Zero\n");
 1751                 return -EINVAL;
 1752         }
 1753 
 1754         plist = malloc(csio->dxfer_len, M_MPR, M_ZERO|M_NOWAIT);
 1755         if (!plist) {
 1756                 mpr_dprint(sc, MPR_ERROR, "Unable to allocate memory to "
 1757                     "save UNMAP data\n");
 1758                 return -ENOMEM;
 1759         }
 1760 
 1761         /* Copy SCSI unmap data to a local buffer */
 1762         bcopy(csio->data_ptr, plist, csio->dxfer_len);
 1763 
 1764         /* return back the unmap command to CAM with success status,
 1765          * if number of descripts is zero.
 1766          */
 1767         ndesc = be16toh(plist->unmap_blk_desc_data_len) >> 4;
 1768         if (!ndesc) {
 1769                 mpr_dprint(sc, MPR_XINFO, "Number of descriptors in "
 1770                     "UNMAP cmd is Zero\n");
 1771                 res = 1;
 1772                 goto out;
 1773         }
 1774 
 1775         data_length = ndesc * sizeof(struct nvme_dsm_range);
 1776         if (data_length > targ->MDTS) {
 1777                 mpr_dprint(sc, MPR_ERROR, "data length: %d is greater than "
 1778                     "Device's MDTS: %d\n", data_length, targ->MDTS);
 1779                 res = -EINVAL;
 1780                 goto out;
 1781         }
 1782 
 1783         prp_page_info = mpr_alloc_prp_page(sc);
 1784         KASSERT(prp_page_info != NULL, ("%s: There is no PRP Page for "
 1785             "UNMAP command.\n", __func__));
 1786 
 1787         /*
 1788          * Insert the allocated PRP page into the command's PRP page list. This
 1789          * will be freed when the command is freed.
 1790          */
 1791         TAILQ_INSERT_TAIL(&cm->cm_prp_page_list, prp_page_info, prp_page_link);
 1792 
 1793         nvme_dsm_ranges = (struct nvme_dsm_range *)prp_page_info->prp_page;
 1794         nvme_dsm_ranges_dma_handle = prp_page_info->prp_page_busaddr;
 1795 
 1796         bzero(nvme_dsm_ranges, data_length);
 1797 
 1798         /* Convert SCSI unmap's descriptor data to NVMe DSM specific Range data
 1799          * for each descriptors contained in SCSI UNMAP data.
 1800          */
 1801         for (i = 0; i < ndesc; i++) {
 1802                 nvme_dsm_ranges[i].length =
 1803                     htole32(be32toh(plist->desc[i].nlb));
 1804                 nvme_dsm_ranges[i].starting_lba =
 1805                     htole64(be64toh(plist->desc[i].slba));
 1806                 nvme_dsm_ranges[i].attributes = 0;
 1807         }
 1808 
 1809         /* Build MPI2.6's NVMe Encapsulated Request Message */
 1810         req = (Mpi26NVMeEncapsulatedRequest_t *)cm->cm_req;
 1811         bzero(req, sizeof(*req));
 1812         req->DevHandle = htole16(targ->handle);
 1813         req->Function = MPI2_FUNCTION_NVME_ENCAPSULATED;
 1814         req->Flags = MPI26_NVME_FLAGS_WRITE;
 1815         req->ErrorResponseBaseAddress.High =
 1816             htole32((uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32));
 1817         req->ErrorResponseBaseAddress.Low =
 1818             htole32(cm->cm_sense_busaddr);
 1819         req->ErrorResponseAllocationLength =
 1820             htole16(sizeof(struct nvme_completion));
 1821         req->EncapsulatedCommandLength =
 1822             htole16(sizeof(struct nvme_command));
 1823         req->DataLength = htole32(data_length);
 1824 
 1825         /* Build NVMe DSM command */
 1826         c = (struct nvme_command *) req->NVMe_Command;
 1827         c->opc = NVME_OPC_DATASET_MANAGEMENT;
 1828         c->nsid = htole32(csio->ccb_h.target_lun + 1);
 1829         c->cdw10 = htole32(ndesc - 1);
 1830         c->cdw11 = htole32(NVME_DSM_ATTR_DEALLOCATE);
 1831 
 1832         cm->cm_length = data_length;
 1833         cm->cm_data = NULL;
 1834 
 1835         cm->cm_complete = mprsas_scsiio_complete;
 1836         cm->cm_complete_data = ccb;
 1837         cm->cm_targ = targ;
 1838         cm->cm_lun = csio->ccb_h.target_lun;
 1839         cm->cm_ccb = ccb;
 1840 
 1841         cm->cm_desc.Default.RequestFlags =
 1842             MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED;
 1843 
 1844 #if __FreeBSD_version >= 1000029
 1845         callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
 1846             mprsas_scsiio_timeout, cm, 0);
 1847 #else //__FreeBSD_version < 1000029
 1848         callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000,
 1849             mprsas_scsiio_timeout, cm);
 1850 #endif //__FreeBSD_version >= 1000029
 1851 
 1852         targ->issued++;
 1853         targ->outstanding++;
 1854         TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
 1855         ccb->ccb_h.status |= CAM_SIM_QUEUED;
 1856 
 1857         mprsas_log_command(cm, MPR_XINFO, "%s cm %p ccb %p outstanding %u\n",
 1858             __func__, cm, ccb, targ->outstanding);
 1859 
 1860         mpr_build_nvme_prp(sc, cm, req,
 1861             (void *)(uintptr_t)nvme_dsm_ranges_dma_handle, 0, data_length);
 1862         mpr_map_command(sc, cm);
 1863 
 1864 out:
 1865         free(plist, M_MPR);
 1866         return 0;
 1867 }
 1868 
 1869 static void
 1870 mprsas_action_scsiio(struct mprsas_softc *sassc, union ccb *ccb)
 1871 {
 1872         MPI2_SCSI_IO_REQUEST *req;
 1873         struct ccb_scsiio *csio;
 1874         struct mpr_softc *sc;
 1875         struct mprsas_target *targ;
 1876         struct mprsas_lun *lun;
 1877         struct mpr_command *cm;
 1878         uint8_t i, lba_byte, *ref_tag_addr, scsi_opcode;
 1879         uint16_t eedp_flags;
 1880         uint32_t mpi_control;
 1881         int rc;
 1882 
 1883         sc = sassc->sc;
 1884         MPR_FUNCTRACE(sc);
 1885         mtx_assert(&sc->mpr_mtx, MA_OWNED);
 1886 
 1887         csio = &ccb->csio;
 1888         KASSERT(csio->ccb_h.target_id < sassc->maxtargets,
 1889             ("Target %d out of bounds in XPT_SCSI_IO\n",
 1890              csio->ccb_h.target_id));
 1891         targ = &sassc->targets[csio->ccb_h.target_id];
 1892         mpr_dprint(sc, MPR_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
 1893         if (targ->handle == 0x0) {
 1894                 mpr_dprint(sc, MPR_ERROR, "%s NULL handle for target %u\n", 
 1895                     __func__, csio->ccb_h.target_id);
 1896                 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
 1897                 xpt_done(ccb);
 1898                 return;
 1899         }
 1900         if (targ->flags & MPR_TARGET_FLAGS_RAID_COMPONENT) {
 1901                 mpr_dprint(sc, MPR_ERROR, "%s Raid component no SCSI IO "
 1902                     "supported %u\n", __func__, csio->ccb_h.target_id);
 1903                 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
 1904                 xpt_done(ccb);
 1905                 return;
 1906         }
 1907         /*
 1908          * Sometimes, it is possible to get a command that is not "In
 1909          * Progress" and was actually aborted by the upper layer.  Check for
 1910          * this here and complete the command without error.
 1911          */
 1912         if (mprsas_get_ccbstatus(ccb) != CAM_REQ_INPROG) {
 1913                 mpr_dprint(sc, MPR_TRACE, "%s Command is not in progress for "
 1914                     "target %u\n", __func__, csio->ccb_h.target_id);
 1915                 xpt_done(ccb);
 1916                 return;
 1917         }
 1918         /*
 1919          * If devinfo is 0 this will be a volume.  In that case don't tell CAM
 1920          * that the volume has timed out.  We want volumes to be enumerated
 1921          * until they are deleted/removed, not just failed.
 1922          */
 1923         if (targ->flags & MPRSAS_TARGET_INREMOVAL) {
 1924                 if (targ->devinfo == 0)
 1925                         mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
 1926                 else
 1927                         mprsas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
 1928                 xpt_done(ccb);
 1929                 return;
 1930         }
 1931 
 1932         if ((sc->mpr_flags & MPR_FLAGS_SHUTDOWN) != 0) {
 1933                 mpr_dprint(sc, MPR_INFO, "%s shutting down\n", __func__);
 1934                 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
 1935                 xpt_done(ccb);
 1936                 return;
 1937         }
 1938 
 1939         /*
 1940          * If target has a reset in progress, freeze the devq and return.  The
 1941          * devq will be released when the TM reset is finished.
 1942          */
 1943         if (targ->flags & MPRSAS_TARGET_INRESET) {
 1944                 ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN;
 1945                 mpr_dprint(sc, MPR_INFO, "%s: Freezing devq for target ID %d\n",
 1946                     __func__, targ->tid);
 1947                 xpt_freeze_devq(ccb->ccb_h.path, 1);
 1948                 xpt_done(ccb);
 1949                 return;
 1950         }
 1951 
 1952         cm = mpr_alloc_command(sc);
 1953         if (cm == NULL || (sc->mpr_flags & MPR_FLAGS_DIAGRESET)) {
 1954                 if (cm != NULL) {
 1955                         mpr_free_command(sc, cm);
 1956                 }
 1957                 if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
 1958                         xpt_freeze_simq(sassc->sim, 1);
 1959                         sassc->flags |= MPRSAS_QUEUE_FROZEN;
 1960                 }
 1961                 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
 1962                 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
 1963                 xpt_done(ccb);
 1964                 return;
 1965         }
 1966 
 1967         /* For NVME device's issue UNMAP command directly to NVME drives by
 1968          * constructing equivalent native NVMe DataSetManagement command.
 1969          */
 1970 #if __FreeBSD_version >= 1100103
 1971         scsi_opcode = scsiio_cdb_ptr(csio)[0];
 1972 #else
 1973         if (csio->ccb_h.flags & CAM_CDB_POINTER)
 1974                 scsi_opcode = csio->cdb_io.cdb_ptr[0];
 1975         else
 1976                 scsi_opcode = csio->cdb_io.cdb_bytes[0];
 1977 #endif
 1978         if (scsi_opcode == UNMAP &&
 1979             targ->is_nvme &&
 1980             (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) {
 1981                 rc = mprsas_build_nvme_unmap(sc, cm, ccb, targ);
 1982                 if (rc == 1) { /* return command to CAM with success status */
 1983                         mpr_free_command(sc, cm);
 1984                         mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
 1985                         xpt_done(ccb);
 1986                         return;
 1987                 } else if (!rc) /* Issued NVMe Encapsulated Request Message */
 1988                         return;
 1989         }
 1990 
 1991         req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
 1992         bzero(req, sizeof(*req));
 1993         req->DevHandle = htole16(targ->handle);
 1994         req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
 1995         req->MsgFlags = 0;
 1996         req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
 1997         req->SenseBufferLength = MPR_SENSE_LEN;
 1998         req->SGLFlags = 0;
 1999         req->ChainOffset = 0;
 2000         req->SGLOffset0 = 24;   /* 32bit word offset to the SGL */
 2001         req->SGLOffset1= 0;
 2002         req->SGLOffset2= 0;
 2003         req->SGLOffset3= 0;
 2004         req->SkipCount = 0;
 2005         req->DataLength = htole32(csio->dxfer_len);
 2006         req->BidirectionalDataLength = 0;
 2007         req->IoFlags = htole16(csio->cdb_len);
 2008         req->EEDPFlags = 0;
 2009 
 2010         /* Note: BiDirectional transfers are not supported */
 2011         switch (csio->ccb_h.flags & CAM_DIR_MASK) {
 2012         case CAM_DIR_IN:
 2013                 mpi_control = MPI2_SCSIIO_CONTROL_READ;
 2014                 cm->cm_flags |= MPR_CM_FLAGS_DATAIN;
 2015                 break;
 2016         case CAM_DIR_OUT:
 2017                 mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
 2018                 cm->cm_flags |= MPR_CM_FLAGS_DATAOUT;
 2019                 break;
 2020         case CAM_DIR_NONE:
 2021         default:
 2022                 mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
 2023                 break;
 2024         }
 2025 
 2026         if (csio->cdb_len == 32)
 2027                 mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
 2028         /*
 2029          * It looks like the hardware doesn't require an explicit tag
 2030          * number for each transaction.  SAM Task Management not supported
 2031          * at the moment.
 2032          */
 2033         switch (csio->tag_action) {
 2034         case MSG_HEAD_OF_Q_TAG:
 2035                 mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
 2036                 break;
 2037         case MSG_ORDERED_Q_TAG:
 2038                 mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
 2039                 break;
 2040         case MSG_ACA_TASK:
 2041                 mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
 2042                 break;
 2043         case CAM_TAG_ACTION_NONE:
 2044         case MSG_SIMPLE_Q_TAG:
 2045         default:
 2046                 mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
 2047                 break;
 2048         }
 2049         mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
 2050         req->Control = htole32(mpi_control);
 2051 
 2052         if (MPR_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
 2053                 mpr_free_command(sc, cm);
 2054                 mprsas_set_ccbstatus(ccb, CAM_LUN_INVALID);
 2055                 xpt_done(ccb);
 2056                 return;
 2057         }
 2058 
 2059         if (csio->ccb_h.flags & CAM_CDB_POINTER)
 2060                 bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
 2061         else {
 2062                 KASSERT(csio->cdb_len <= IOCDBLEN,
 2063                     ("cdb_len %d is greater than IOCDBLEN but CAM_CDB_POINTER "
 2064                     "is not set", csio->cdb_len));
 2065                 bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
 2066         }
 2067         req->IoFlags = htole16(csio->cdb_len);
 2068 
 2069         /*
 2070          * Check if EEDP is supported and enabled.  If it is then check if the
 2071          * SCSI opcode could be using EEDP.  If so, make sure the LUN exists and
 2072          * is formatted for EEDP support.  If all of this is true, set CDB up
 2073          * for EEDP transfer.
 2074          */
 2075         eedp_flags = op_code_prot[req->CDB.CDB32[0]];
 2076         if (sc->eedp_enabled && eedp_flags) {
 2077                 SLIST_FOREACH(lun, &targ->luns, lun_link) {
 2078                         if (lun->lun_id == csio->ccb_h.target_lun) {
 2079                                 break;
 2080                         }
 2081                 }
 2082 
 2083                 if ((lun != NULL) && (lun->eedp_formatted)) {
 2084                         req->EEDPBlockSize = htole16(lun->eedp_block_size);
 2085                         eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
 2086                             MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
 2087                             MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
 2088                         if (sc->mpr_flags & MPR_FLAGS_GEN35_IOC) {
 2089                                 eedp_flags |=
 2090                                     MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE;
 2091                         }
 2092                         req->EEDPFlags = htole16(eedp_flags);
 2093 
 2094                         /*
 2095                          * If CDB less than 32, fill in Primary Ref Tag with
 2096                          * low 4 bytes of LBA.  If CDB is 32, tag stuff is
 2097                          * already there.  Also, set protection bit.  FreeBSD
 2098                          * currently does not support CDBs bigger than 16, but
 2099                          * the code doesn't hurt, and will be here for the
 2100                          * future.
 2101                          */
 2102                         if (csio->cdb_len != 32) {
 2103                                 lba_byte = (csio->cdb_len == 16) ? 6 : 2;
 2104                                 ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
 2105                                     PrimaryReferenceTag;
 2106                                 for (i = 0; i < 4; i++) {
 2107                                         *ref_tag_addr =
 2108                                             req->CDB.CDB32[lba_byte + i];
 2109                                         ref_tag_addr++;
 2110                                 }
 2111                                 req->CDB.EEDP32.PrimaryReferenceTag = 
 2112                                     htole32(req->
 2113                                     CDB.EEDP32.PrimaryReferenceTag);
 2114                                 req->CDB.EEDP32.PrimaryApplicationTagMask =
 2115                                     0xFFFF;
 2116                                 req->CDB.CDB32[1] =
 2117                                     (req->CDB.CDB32[1] & 0x1F) | 0x20;
 2118                         } else {
 2119                                 eedp_flags |=
 2120                                     MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
 2121                                 req->EEDPFlags = htole16(eedp_flags);
 2122                                 req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
 2123                                     0x1F) | 0x20;
 2124                         }
 2125                 }
 2126         }
 2127 
 2128         cm->cm_length = csio->dxfer_len;
 2129         if (cm->cm_length != 0) {
 2130                 cm->cm_data = ccb;
 2131                 cm->cm_flags |= MPR_CM_FLAGS_USE_CCB;
 2132         } else {
 2133                 cm->cm_data = NULL;
 2134         }
 2135         cm->cm_sge = &req->SGL;
 2136         cm->cm_sglsize = (32 - 24) * 4;
 2137         cm->cm_complete = mprsas_scsiio_complete;
 2138         cm->cm_complete_data = ccb;
 2139         cm->cm_targ = targ;
 2140         cm->cm_lun = csio->ccb_h.target_lun;
 2141         cm->cm_ccb = ccb;
 2142         /*
 2143          * If using FP desc type, need to set a bit in IoFlags (SCSI IO is 0)
 2144          * and set descriptor type.
 2145          */
 2146         if (targ->scsi_req_desc_type ==
 2147             MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO) {
 2148                 req->IoFlags |= MPI25_SCSIIO_IOFLAGS_FAST_PATH;
 2149                 cm->cm_desc.FastPathSCSIIO.RequestFlags =
 2150                     MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
 2151                 if (!sc->atomic_desc_capable) {
 2152                         cm->cm_desc.FastPathSCSIIO.DevHandle =
 2153                             htole16(targ->handle);
 2154                 }
 2155         } else {
 2156                 cm->cm_desc.SCSIIO.RequestFlags =
 2157                     MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
 2158                 if (!sc->atomic_desc_capable)
 2159                         cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
 2160         }
 2161 
 2162 #if __FreeBSD_version >= 1000029
 2163         callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
 2164             mprsas_scsiio_timeout, cm, 0);
 2165 #else //__FreeBSD_version < 1000029
 2166         callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000,
 2167             mprsas_scsiio_timeout, cm);
 2168 #endif //__FreeBSD_version >= 1000029
 2169 
 2170         targ->issued++;
 2171         targ->outstanding++;
 2172         TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
 2173         ccb->ccb_h.status |= CAM_SIM_QUEUED;
 2174 
 2175         mprsas_log_command(cm, MPR_XINFO, "%s cm %p ccb %p outstanding %u\n",
 2176             __func__, cm, ccb, targ->outstanding);
 2177 
 2178         mpr_map_command(sc, cm);
 2179         return;
 2180 }
 2181 
 2182 static void
 2183 mpr_response_code(struct mpr_softc *sc, u8 response_code)
 2184 {
 2185         char *desc;
 2186  
 2187         switch (response_code) {
 2188         case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
 2189                 desc = "task management request completed";
 2190                 break;
 2191         case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
 2192                 desc = "invalid frame";
 2193                 break;
 2194         case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
 2195                 desc = "task management request not supported";
 2196                 break;
 2197         case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
 2198                 desc = "task management request failed";
 2199                 break;
 2200         case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
 2201                 desc = "task management request succeeded";
 2202                 break;
 2203         case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
 2204                 desc = "invalid lun";
 2205                 break;
 2206         case 0xA:
 2207                 desc = "overlapped tag attempted";
 2208                 break;
 2209         case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
 2210                 desc = "task queued, however not sent to target";
 2211                 break;
 2212         default:
 2213                 desc = "unknown";
 2214                 break;
 2215         }
 2216         mpr_dprint(sc, MPR_XINFO, "response_code(0x%01x): %s\n", response_code,
 2217             desc);
 2218 }
 2219 
 2220 /**
 2221  * mpr_sc_failed_io_info - translated non-succesfull SCSI_IO request
 2222  */
 2223 static void
 2224 mpr_sc_failed_io_info(struct mpr_softc *sc, struct ccb_scsiio *csio,
 2225     Mpi2SCSIIOReply_t *mpi_reply, struct mprsas_target *targ)
 2226 {
 2227         u32 response_info;
 2228         u8 *response_bytes;
 2229         u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
 2230             MPI2_IOCSTATUS_MASK;
 2231         u8 scsi_state = mpi_reply->SCSIState;
 2232         u8 scsi_status = mpi_reply->SCSIStatus;
 2233         char *desc_ioc_state = NULL;
 2234         char *desc_scsi_status = NULL;
 2235         char *desc_scsi_state = sc->tmp_string;
 2236         u32 log_info = le32toh(mpi_reply->IOCLogInfo);
 2237         
 2238         if (log_info == 0x31170000)
 2239                 return;
 2240 
 2241         switch (ioc_status) {
 2242         case MPI2_IOCSTATUS_SUCCESS:
 2243                 desc_ioc_state = "success";
 2244                 break;
 2245         case MPI2_IOCSTATUS_INVALID_FUNCTION:
 2246                 desc_ioc_state = "invalid function";
 2247                 break;
 2248         case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
 2249                 desc_ioc_state = "scsi recovered error";
 2250                 break;
 2251         case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
 2252                 desc_ioc_state = "scsi invalid dev handle";
 2253                 break;
 2254         case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
 2255                 desc_ioc_state = "scsi device not there";
 2256                 break;
 2257         case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
 2258                 desc_ioc_state = "scsi data overrun";
 2259                 break;
 2260         case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
 2261                 desc_ioc_state = "scsi data underrun";
 2262                 break;
 2263         case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
 2264                 desc_ioc_state = "scsi io data error";
 2265                 break;
 2266         case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
 2267                 desc_ioc_state = "scsi protocol error";
 2268                 break;
 2269         case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
 2270                 desc_ioc_state = "scsi task terminated";
 2271                 break;
 2272         case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
 2273                 desc_ioc_state = "scsi residual mismatch";
 2274                 break;
 2275         case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
 2276                 desc_ioc_state = "scsi task mgmt failed";
 2277                 break;
 2278         case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
 2279                 desc_ioc_state = "scsi ioc terminated";
 2280                 break;
 2281         case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
 2282                 desc_ioc_state = "scsi ext terminated";
 2283                 break;
 2284         case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
 2285                 desc_ioc_state = "eedp guard error";
 2286                 break;
 2287         case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
 2288                 desc_ioc_state = "eedp ref tag error";
 2289                 break;
 2290         case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
 2291                 desc_ioc_state = "eedp app tag error";
 2292                 break;
 2293         case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
 2294                 desc_ioc_state = "insufficient power";
 2295                 break;
 2296         default:
 2297                 desc_ioc_state = "unknown";
 2298                 break;
 2299         }
 2300 
 2301         switch (scsi_status) {
 2302         case MPI2_SCSI_STATUS_GOOD:
 2303                 desc_scsi_status = "good";
 2304                 break;
 2305         case MPI2_SCSI_STATUS_CHECK_CONDITION:
 2306                 desc_scsi_status = "check condition";
 2307                 break;
 2308         case MPI2_SCSI_STATUS_CONDITION_MET:
 2309                 desc_scsi_status = "condition met";
 2310                 break;
 2311         case MPI2_SCSI_STATUS_BUSY:
 2312                 desc_scsi_status = "busy";
 2313                 break;
 2314         case MPI2_SCSI_STATUS_INTERMEDIATE:
 2315                 desc_scsi_status = "intermediate";
 2316                 break;
 2317         case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET:
 2318                 desc_scsi_status = "intermediate condmet";
 2319                 break;
 2320         case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
 2321                 desc_scsi_status = "reservation conflict";
 2322                 break;
 2323         case MPI2_SCSI_STATUS_COMMAND_TERMINATED:
 2324                 desc_scsi_status = "command terminated";
 2325                 break;
 2326         case MPI2_SCSI_STATUS_TASK_SET_FULL:
 2327                 desc_scsi_status = "task set full";
 2328                 break;
 2329         case MPI2_SCSI_STATUS_ACA_ACTIVE:
 2330                 desc_scsi_status = "aca active";
 2331                 break;
 2332         case MPI2_SCSI_STATUS_TASK_ABORTED:
 2333                 desc_scsi_status = "task aborted";
 2334                 break;
 2335         default:
 2336                 desc_scsi_status = "unknown";
 2337                 break;
 2338         }
 2339 
 2340         desc_scsi_state[0] = '\0';
 2341         if (!scsi_state)
 2342                 desc_scsi_state = " ";
 2343         if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
 2344                 strcat(desc_scsi_state, "response info ");
 2345         if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
 2346                 strcat(desc_scsi_state, "state terminated ");
 2347         if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS)
 2348                 strcat(desc_scsi_state, "no status ");
 2349         if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED)
 2350                 strcat(desc_scsi_state, "autosense failed ");
 2351         if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)
 2352                 strcat(desc_scsi_state, "autosense valid ");
 2353 
 2354         mpr_dprint(sc, MPR_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
 2355             le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
 2356         if (targ->encl_level_valid) {
 2357                 mpr_dprint(sc, MPR_XINFO, "At enclosure level %d, slot %d, "
 2358                     "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
 2359                     targ->connector_name);
 2360         }
 2361         /* We can add more detail about underflow data here
 2362          * TO-DO
 2363          * */
 2364         mpr_dprint(sc, MPR_XINFO, "\tscsi_status(%s)(0x%02x), "
 2365             "scsi_state(%s)(0x%02x)\n", desc_scsi_status, scsi_status,
 2366             desc_scsi_state, scsi_state);
 2367 
 2368         if (sc->mpr_debug & MPR_XINFO &&
 2369             scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
 2370                 mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : Start :\n");
 2371                 scsi_sense_print(csio);
 2372                 mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : End :\n");
 2373         }
 2374 
 2375         if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
 2376                 response_info = le32toh(mpi_reply->ResponseInfo);
 2377                 response_bytes = (u8 *)&response_info;
 2378                 mpr_response_code(sc,response_bytes[0]);
 2379         }
 2380 }
 2381 
 2382 /** mprsas_nvme_trans_status_code
 2383  *
 2384  * Convert Native NVMe command error status to
 2385  * equivalent SCSI error status.
 2386  *
 2387  * Returns appropriate scsi_status
 2388  */
 2389 static u8
 2390 mprsas_nvme_trans_status_code(struct nvme_status nvme_status,
 2391     struct mpr_command *cm)
 2392 {
 2393         u8 status = MPI2_SCSI_STATUS_GOOD;
 2394         int skey, asc, ascq;
 2395         union ccb *ccb = cm->cm_complete_data;
 2396         int returned_sense_len;
 2397 
 2398         status = MPI2_SCSI_STATUS_CHECK_CONDITION;
 2399         skey = SSD_KEY_ILLEGAL_REQUEST;
 2400         asc = SCSI_ASC_NO_SENSE;
 2401         ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
 2402 
 2403         switch (nvme_status.sct) {
 2404         case NVME_SCT_GENERIC:
 2405                 switch (nvme_status.sc) {
 2406                 case NVME_SC_SUCCESS:
 2407                         status = MPI2_SCSI_STATUS_GOOD;
 2408                         skey = SSD_KEY_NO_SENSE;
 2409                         asc = SCSI_ASC_NO_SENSE;
 2410                         ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
 2411                         break;
 2412                 case NVME_SC_INVALID_OPCODE:
 2413                         status = MPI2_SCSI_STATUS_CHECK_CONDITION;
 2414                         skey = SSD_KEY_ILLEGAL_REQUEST;
 2415                         asc = SCSI_ASC_ILLEGAL_COMMAND;
 2416                         ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
 2417                         break;
 2418                 case NVME_SC_INVALID_FIELD:
 2419                         status = MPI2_SCSI_STATUS_CHECK_CONDITION;
 2420                         skey = SSD_KEY_ILLEGAL_REQUEST;
 2421                         asc = SCSI_ASC_INVALID_CDB;
 2422                         ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
 2423                         break;
 2424                 case NVME_SC_DATA_TRANSFER_ERROR:
 2425                         status = MPI2_SCSI_STATUS_CHECK_CONDITION;
 2426                         skey = SSD_KEY_MEDIUM_ERROR;
 2427                         asc = SCSI_ASC_NO_SENSE;
 2428                         ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
 2429                         break;
 2430                 case NVME_SC_ABORTED_POWER_LOSS:
 2431                         status = MPI2_SCSI_STATUS_TASK_ABORTED;
 2432                         skey = SSD_KEY_ABORTED_COMMAND;
 2433                         asc = SCSI_ASC_WARNING;
 2434                         ascq = SCSI_ASCQ_POWER_LOSS_EXPECTED;
 2435                         break;
 2436                 case NVME_SC_INTERNAL_DEVICE_ERROR:
 2437                         status = MPI2_SCSI_STATUS_CHECK_CONDITION;
 2438                         skey = SSD_KEY_HARDWARE_ERROR;
 2439                         asc = SCSI_ASC_INTERNAL_TARGET_FAILURE;
 2440                         ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
 2441                         break;
 2442                 case NVME_SC_ABORTED_BY_REQUEST:
 2443                 case NVME_SC_ABORTED_SQ_DELETION:
 2444                 case NVME_SC_ABORTED_FAILED_FUSED:
 2445                 case NVME_SC_ABORTED_MISSING_FUSED:
 2446                         status = MPI2_SCSI_STATUS_TASK_ABORTED;
 2447                         skey = SSD_KEY_ABORTED_COMMAND;
 2448                         asc = SCSI_ASC_NO_SENSE;
 2449                         ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
 2450                         break;
 2451                 case NVME_SC_INVALID_NAMESPACE_OR_FORMAT:
 2452                         status = MPI2_SCSI_STATUS_CHECK_CONDITION;
 2453                         skey = SSD_KEY_ILLEGAL_REQUEST;
 2454                         asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
 2455                         ascq = SCSI_ASCQ_INVALID_LUN_ID;
 2456                         break;
 2457                 case NVME_SC_LBA_OUT_OF_RANGE:
 2458                         status = MPI2_SCSI_STATUS_CHECK_CONDITION;
 2459                         skey = SSD_KEY_ILLEGAL_REQUEST;
 2460                         asc = SCSI_ASC_ILLEGAL_BLOCK;
 2461                         ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
 2462                         break;
 2463                 case NVME_SC_CAPACITY_EXCEEDED:
 2464                         status = MPI2_SCSI_STATUS_CHECK_CONDITION;
 2465                         skey = SSD_KEY_MEDIUM_ERROR;
 2466                         asc = SCSI_ASC_NO_SENSE;
 2467                         ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
 2468                         break;
 2469                 case NVME_SC_NAMESPACE_NOT_READY:
 2470                         status = MPI2_SCSI_STATUS_CHECK_CONDITION;
 2471                         skey = SSD_KEY_NOT_READY; 
 2472                         asc = SCSI_ASC_LUN_NOT_READY;
 2473                         ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
 2474                         break;
 2475                 }
 2476                 break;
 2477         case NVME_SCT_COMMAND_SPECIFIC:
 2478                 switch (nvme_status.sc) {
 2479                 case NVME_SC_INVALID_FORMAT:
 2480                         status = MPI2_SCSI_STATUS_CHECK_CONDITION;
 2481                         skey = SSD_KEY_ILLEGAL_REQUEST;
 2482                         asc = SCSI_ASC_FORMAT_COMMAND_FAILED;
 2483                         ascq = SCSI_ASCQ_FORMAT_COMMAND_FAILED;
 2484                         break;
 2485                 case NVME_SC_CONFLICTING_ATTRIBUTES:
 2486                         status = MPI2_SCSI_STATUS_CHECK_CONDITION;
 2487                         skey = SSD_KEY_ILLEGAL_REQUEST;
 2488                         asc = SCSI_ASC_INVALID_CDB;
 2489                         ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
 2490                         break;
 2491                 }
 2492                 break;
 2493         case NVME_SCT_MEDIA_ERROR:
 2494                 switch (nvme_status.sc) {
 2495                 case NVME_SC_WRITE_FAULTS:
 2496                         status = MPI2_SCSI_STATUS_CHECK_CONDITION;
 2497                         skey = SSD_KEY_MEDIUM_ERROR;
 2498                         asc = SCSI_ASC_PERIPHERAL_DEV_WRITE_FAULT;
 2499                         ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
 2500                         break;
 2501                 case NVME_SC_UNRECOVERED_READ_ERROR:
 2502                         status = MPI2_SCSI_STATUS_CHECK_CONDITION;
 2503                         skey = SSD_KEY_MEDIUM_ERROR;
 2504                         asc = SCSI_ASC_UNRECOVERED_READ_ERROR;
 2505                         ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
 2506                         break;
 2507                 case NVME_SC_GUARD_CHECK_ERROR:
 2508                         status = MPI2_SCSI_STATUS_CHECK_CONDITION;
 2509                         skey = SSD_KEY_MEDIUM_ERROR;
 2510                         asc = SCSI_ASC_LOG_BLOCK_GUARD_CHECK_FAILED;
 2511                         ascq = SCSI_ASCQ_LOG_BLOCK_GUARD_CHECK_FAILED;
 2512                         break;
 2513                 case NVME_SC_APPLICATION_TAG_CHECK_ERROR:
 2514                         status = MPI2_SCSI_STATUS_CHECK_CONDITION;
 2515                         skey = SSD_KEY_MEDIUM_ERROR;
 2516                         asc = SCSI_ASC_LOG_BLOCK_APPTAG_CHECK_FAILED;
 2517                         ascq = SCSI_ASCQ_LOG_BLOCK_APPTAG_CHECK_FAILED;
 2518                         break;
 2519                 case NVME_SC_REFERENCE_TAG_CHECK_ERROR:
 2520                         status = MPI2_SCSI_STATUS_CHECK_CONDITION;
 2521                         skey = SSD_KEY_MEDIUM_ERROR;
 2522                         asc = SCSI_ASC_LOG_BLOCK_REFTAG_CHECK_FAILED;
 2523                         ascq = SCSI_ASCQ_LOG_BLOCK_REFTAG_CHECK_FAILED;
 2524                         break;
 2525                 case NVME_SC_COMPARE_FAILURE:
 2526                         status = MPI2_SCSI_STATUS_CHECK_CONDITION;
 2527                         skey = SSD_KEY_MISCOMPARE;
 2528                         asc = SCSI_ASC_MISCOMPARE_DURING_VERIFY;
 2529                         ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
 2530                         break;
 2531                 case NVME_SC_ACCESS_DENIED:
 2532                         status = MPI2_SCSI_STATUS_CHECK_CONDITION;
 2533                         skey = SSD_KEY_ILLEGAL_REQUEST;
 2534                         asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
 2535                         ascq = SCSI_ASCQ_INVALID_LUN_ID;
 2536                         break;
 2537                 }
 2538                 break;
 2539         }
 2540         
 2541         returned_sense_len = sizeof(struct scsi_sense_data);
 2542         if (returned_sense_len < ccb->csio.sense_len)
 2543                 ccb->csio.sense_resid = ccb->csio.sense_len -
 2544                     returned_sense_len;
 2545         else
 2546                 ccb->csio.sense_resid = 0;
 2547 
 2548         scsi_set_sense_data(&ccb->csio.sense_data, SSD_TYPE_FIXED,
 2549             1, skey, asc, ascq, SSD_ELEM_NONE);
 2550         ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
 2551 
 2552         return status;
 2553 }
 2554 
 2555 /** mprsas_complete_nvme_unmap 
 2556  *
 2557  * Complete native NVMe command issued using NVMe Encapsulated
 2558  * Request Message.
 2559  */
 2560 static u8
 2561 mprsas_complete_nvme_unmap(struct mpr_softc *sc, struct mpr_command *cm)
 2562 {
 2563         Mpi26NVMeEncapsulatedErrorReply_t *mpi_reply;
 2564         struct nvme_completion *nvme_completion = NULL;
 2565         u8 scsi_status = MPI2_SCSI_STATUS_GOOD;
 2566 
 2567         mpi_reply =(Mpi26NVMeEncapsulatedErrorReply_t *)cm->cm_reply;
 2568         if (le16toh(mpi_reply->ErrorResponseCount)){
 2569                 nvme_completion = (struct nvme_completion *)cm->cm_sense;
 2570                 scsi_status = mprsas_nvme_trans_status_code(
 2571                     nvme_completion->status, cm);
 2572         }
 2573         return scsi_status;
 2574 }
 2575 
 2576 static void
 2577 mprsas_scsiio_complete(struct mpr_softc *sc, struct mpr_command *cm)
 2578 {
 2579         MPI2_SCSI_IO_REPLY *rep;
 2580         union ccb *ccb;
 2581         struct ccb_scsiio *csio;
 2582         struct mprsas_softc *sassc;
 2583         struct scsi_vpd_supported_page_list *vpd_list = NULL;
 2584         u8 *TLR_bits, TLR_on, *scsi_cdb;
 2585         int dir = 0, i;
 2586         u16 alloc_len;
 2587         struct mprsas_target *target;
 2588         target_id_t target_id;
 2589 
 2590         MPR_FUNCTRACE(sc);
 2591         mpr_dprint(sc, MPR_TRACE,
 2592             "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm,
 2593             cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
 2594             cm->cm_targ->outstanding);
 2595 
 2596         callout_stop(&cm->cm_callout);
 2597         mtx_assert(&sc->mpr_mtx, MA_OWNED);
 2598 
 2599         sassc = sc->sassc;
 2600         ccb = cm->cm_complete_data;
 2601         csio = &ccb->csio;
 2602         target_id = csio->ccb_h.target_id;
 2603         rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
 2604         /*
 2605          * XXX KDM if the chain allocation fails, does it matter if we do
 2606          * the sync and unload here?  It is simpler to do it in every case,
 2607          * assuming it doesn't cause problems.
 2608          */
 2609         if (cm->cm_data != NULL) {
 2610                 if (cm->cm_flags & MPR_CM_FLAGS_DATAIN)
 2611                         dir = BUS_DMASYNC_POSTREAD;
 2612                 else if (cm->cm_flags & MPR_CM_FLAGS_DATAOUT)
 2613                         dir = BUS_DMASYNC_POSTWRITE;
 2614                 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
 2615                 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
 2616         }
 2617 
 2618         cm->cm_targ->completed++;
 2619         cm->cm_targ->outstanding--;
 2620         TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
 2621         ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
 2622 
 2623         if (cm->cm_state == MPR_CM_STATE_TIMEDOUT) {
 2624                 TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
 2625                 if (cm->cm_reply != NULL)
 2626                         mprsas_log_command(cm, MPR_RECOVERY,
 2627                             "completed timedout cm %p ccb %p during recovery "
 2628                             "ioc %x scsi %x state %x xfer %u\n", cm, cm->cm_ccb,
 2629                             le16toh(rep->IOCStatus), rep->SCSIStatus,
 2630                             rep->SCSIState, le32toh(rep->TransferCount));
 2631                 else
 2632                         mprsas_log_command(cm, MPR_RECOVERY,
 2633                             "completed timedout cm %p ccb %p during recovery\n",
 2634                             cm, cm->cm_ccb);
 2635         } else if (cm->cm_targ->tm != NULL) {
 2636                 if (cm->cm_reply != NULL)
 2637                         mprsas_log_command(cm, MPR_RECOVERY,
 2638                             "completed cm %p ccb %p during recovery "
 2639                             "ioc %x scsi %x state %x xfer %u\n",
 2640                             cm, cm->cm_ccb, le16toh(rep->IOCStatus),
 2641                             rep->SCSIStatus, rep->SCSIState,
 2642                             le32toh(rep->TransferCount));
 2643                 else
 2644                         mprsas_log_command(cm, MPR_RECOVERY,
 2645                             "completed cm %p ccb %p during recovery\n",
 2646                             cm, cm->cm_ccb);
 2647         } else if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
 2648                 mprsas_log_command(cm, MPR_RECOVERY,
 2649                     "reset completed cm %p ccb %p\n", cm, cm->cm_ccb);
 2650         }
 2651 
 2652         if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
 2653                 /*
 2654                  * We ran into an error after we tried to map the command,
 2655                  * so we're getting a callback without queueing the command
 2656                  * to the hardware.  So we set the status here, and it will
 2657                  * be retained below.  We'll go through the "fast path",
 2658                  * because there can be no reply when we haven't actually
 2659                  * gone out to the hardware.
 2660                  */
 2661                 mprsas_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
 2662 
 2663                 /*
 2664                  * Currently the only error included in the mask is
 2665                  * MPR_CM_FLAGS_CHAIN_FAILED, which means we're out of
 2666                  * chain frames.  We need to freeze the queue until we get
 2667                  * a command that completed without this error, which will
 2668                  * hopefully have some chain frames attached that we can
 2669                  * use.  If we wanted to get smarter about it, we would
 2670                  * only unfreeze the queue in this condition when we're
 2671                  * sure that we're getting some chain frames back.  That's
 2672                  * probably unnecessary.
 2673                  */
 2674                 if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
 2675                         xpt_freeze_simq(sassc->sim, 1);
 2676                         sassc->flags |= MPRSAS_QUEUE_FROZEN;
 2677                         mpr_dprint(sc, MPR_XINFO, "Error sending command, "
 2678                             "freezing SIM queue\n");
 2679                 }
 2680         }
 2681 
 2682         /*
 2683          * Point to the SCSI CDB, which is dependent on the CAM_CDB_POINTER
 2684          * flag, and use it in a few places in the rest of this function for
 2685          * convenience. Use the macro if available.
 2686          */
 2687 #if __FreeBSD_version >= 1100103
 2688         scsi_cdb = scsiio_cdb_ptr(csio);
 2689 #else
 2690         if (csio->ccb_h.flags & CAM_CDB_POINTER)
 2691                 scsi_cdb = csio->cdb_io.cdb_ptr;
 2692         else
 2693                 scsi_cdb = csio->cdb_io.cdb_bytes;
 2694 #endif
 2695 
 2696         /*
 2697          * If this is a Start Stop Unit command and it was issued by the driver
 2698          * during shutdown, decrement the refcount to account for all of the
 2699          * commands that were sent.  All SSU commands should be completed before
 2700          * shutdown completes, meaning SSU_refcount will be 0 after SSU_started
 2701          * is TRUE.
 2702          */
 2703         if (sc->SSU_started && (scsi_cdb[0] == START_STOP_UNIT)) {
 2704                 mpr_dprint(sc, MPR_INFO, "Decrementing SSU count.\n");
 2705                 sc->SSU_refcount--;
 2706         }
 2707 
 2708         /* Take the fast path to completion */
 2709         if (cm->cm_reply == NULL) {
 2710                 if (mprsas_get_ccbstatus(ccb) == CAM_REQ_INPROG) {
 2711                         if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0)
 2712                                 mprsas_set_ccbstatus(ccb, CAM_SCSI_BUS_RESET);
 2713                         else {
 2714                                 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
 2715                                 csio->scsi_status = SCSI_STATUS_OK;
 2716                         }
 2717                         if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
 2718                                 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
 2719                                 sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
 2720                                 mpr_dprint(sc, MPR_XINFO,
 2721                                     "Unfreezing SIM queue\n");
 2722                         }
 2723                 } 
 2724 
 2725                 /*
 2726                  * There are two scenarios where the status won't be
 2727                  * CAM_REQ_CMP.  The first is if MPR_CM_FLAGS_ERROR_MASK is
 2728                  * set, the second is in the MPR_FLAGS_DIAGRESET above.
 2729                  */
 2730                 if (mprsas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
 2731                         /*
 2732                          * Freeze the dev queue so that commands are
 2733                          * executed in the correct order after error
 2734                          * recovery.
 2735                          */
 2736                         ccb->ccb_h.status |= CAM_DEV_QFRZN;
 2737                         xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
 2738                 }
 2739                 mpr_free_command(sc, cm);
 2740                 xpt_done(ccb);
 2741                 return;
 2742         }
 2743 
 2744         target = &sassc->targets[target_id];
 2745         if (scsi_cdb[0] == UNMAP &&
 2746             target->is_nvme &&
 2747             (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) {
 2748                 rep->SCSIStatus = mprsas_complete_nvme_unmap(sc, cm);
 2749                 csio->scsi_status = rep->SCSIStatus;
 2750         }
 2751 
 2752         mprsas_log_command(cm, MPR_XINFO,
 2753             "ioc %x scsi %x state %x xfer %u\n",
 2754             le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
 2755             le32toh(rep->TransferCount));
 2756 
 2757         switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
 2758         case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
 2759                 csio->resid = cm->cm_length - le32toh(rep->TransferCount);
 2760                 /* FALLTHROUGH */
 2761         case MPI2_IOCSTATUS_SUCCESS:
 2762         case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
 2763                 if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
 2764                     MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
 2765                         mprsas_log_command(cm, MPR_XINFO, "recovered error\n");
 2766 
 2767                 /* Completion failed at the transport level. */
 2768                 if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
 2769                     MPI2_SCSI_STATE_TERMINATED)) {
 2770                         mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
 2771                         break;
 2772                 }
 2773 
 2774                 /* In a modern packetized environment, an autosense failure
 2775                  * implies that there's not much else that can be done to
 2776                  * recover the command.
 2777                  */
 2778                 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
 2779                         mprsas_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL);
 2780                         break;
 2781                 }
 2782 
 2783                 /*
 2784                  * CAM doesn't care about SAS Response Info data, but if this is
 2785                  * the state check if TLR should be done.  If not, clear the
 2786                  * TLR_bits for the target.
 2787                  */
 2788                 if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
 2789                     ((le32toh(rep->ResponseInfo) & MPI2_SCSI_RI_MASK_REASONCODE)
 2790                     == MPR_SCSI_RI_INVALID_FRAME)) {
 2791                         sc->mapping_table[target_id].TLR_bits =
 2792                             (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
 2793                 }
 2794 
 2795                 /*
 2796                  * Intentionally override the normal SCSI status reporting
 2797                  * for these two cases.  These are likely to happen in a
 2798                  * multi-initiator environment, and we want to make sure that
 2799                  * CAM retries these commands rather than fail them.
 2800                  */
 2801                 if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
 2802                     (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
 2803                         mprsas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
 2804                         break;
 2805                 }
 2806 
 2807                 /* Handle normal status and sense */
 2808                 csio->scsi_status = rep->SCSIStatus;
 2809                 if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
 2810                         mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
 2811                 else
 2812                         mprsas_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR);
 2813 
 2814                 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
 2815                         int sense_len, returned_sense_len;
 2816 
 2817                         returned_sense_len = min(le32toh(rep->SenseCount),
 2818                             sizeof(struct scsi_sense_data));
 2819                         if (returned_sense_len < csio->sense_len)
 2820                                 csio->sense_resid = csio->sense_len -
 2821                                     returned_sense_len;
 2822                         else
 2823                                 csio->sense_resid = 0;
 2824 
 2825                         sense_len = min(returned_sense_len,
 2826                             csio->sense_len - csio->sense_resid);
 2827                         bzero(&csio->sense_data, sizeof(csio->sense_data));
 2828                         bcopy(cm->cm_sense, &csio->sense_data, sense_len);
 2829                         ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
 2830                 }
 2831 
 2832                 /*
 2833                  * Check if this is an INQUIRY command.  If it's a VPD inquiry,
 2834                  * and it's page code 0 (Supported Page List), and there is
 2835                  * inquiry data, and this is for a sequential access device, and
 2836                  * the device is an SSP target, and TLR is supported by the
 2837                  * controller, turn the TLR_bits value ON if page 0x90 is
 2838                  * supported.
 2839                  */
 2840                 if ((scsi_cdb[0] == INQUIRY) &&
 2841                     (scsi_cdb[1] & SI_EVPD) &&
 2842                     (scsi_cdb[2] == SVPD_SUPPORTED_PAGE_LIST) &&
 2843                     ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
 2844                     (csio->data_ptr != NULL) &&
 2845                     ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) &&
 2846                     (sc->control_TLR) &&
 2847                     (sc->mapping_table[target_id].device_info &
 2848                     MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
 2849                         vpd_list = (struct scsi_vpd_supported_page_list *)
 2850                             csio->data_ptr;
 2851                         TLR_bits = &sc->mapping_table[target_id].TLR_bits;
 2852                         *TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
 2853                         TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
 2854                         alloc_len = ((u16)scsi_cdb[3] << 8) + scsi_cdb[4];
 2855                         alloc_len -= csio->resid;
 2856                         for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
 2857                                 if (vpd_list->list[i] == 0x90) {
 2858                                         *TLR_bits = TLR_on;
 2859                                         break;
 2860                                 }
 2861                         }
 2862                 }
 2863 
 2864                 /*
 2865                  * If this is a SATA direct-access end device, mark it so that
 2866                  * a SCSI StartStopUnit command will be sent to it when the
 2867                  * driver is being shutdown.
 2868                  */
 2869                 if ((scsi_cdb[0] == INQUIRY) &&
 2870                     (csio->data_ptr != NULL) &&
 2871                     ((csio->data_ptr[0] & 0x1f) == T_DIRECT) &&
 2872                     (sc->mapping_table[target_id].device_info &
 2873                     MPI2_SAS_DEVICE_INFO_SATA_DEVICE) &&
 2874                     ((sc->mapping_table[target_id].device_info &
 2875                     MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) ==
 2876                     MPI2_SAS_DEVICE_INFO_END_DEVICE)) {
 2877                         target = &sassc->targets[target_id];
 2878                         target->supports_SSU = TRUE;
 2879                         mpr_dprint(sc, MPR_XINFO, "Target %d supports SSU\n",
 2880                             target_id);
 2881                 }
 2882                 break;
 2883         case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
 2884         case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
 2885                 /*
 2886                  * If devinfo is 0 this will be a volume.  In that case don't
 2887                  * tell CAM that the volume is not there.  We want volumes to
 2888                  * be enumerated until they are deleted/removed, not just
 2889                  * failed.
 2890                  */
 2891                 if (cm->cm_targ->devinfo == 0)
 2892                         mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
 2893                 else
 2894                         mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
 2895                 break;
 2896         case MPI2_IOCSTATUS_INVALID_SGL:
 2897                 mpr_print_scsiio_cmd(sc, cm);
 2898                 mprsas_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR);
 2899                 break;
 2900         case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
 2901                 /*
 2902                  * This is one of the responses that comes back when an I/O
 2903                  * has been aborted.  If it is because of a timeout that we
 2904                  * initiated, just set the status to CAM_CMD_TIMEOUT.
 2905                  * Otherwise set it to CAM_REQ_ABORTED.  The effect on the
 2906                  * command is the same (it gets retried, subject to the
 2907                  * retry counter), the only difference is what gets printed
 2908                  * on the console.
 2909                  */
 2910                 if (cm->cm_state == MPR_CM_STATE_TIMEDOUT)
 2911                         mprsas_set_ccbstatus(ccb, CAM_CMD_TIMEOUT);
 2912                 else
 2913                         mprsas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
 2914                 break;
 2915         case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
 2916                 /* resid is ignored for this condition */
 2917                 csio->resid = 0;
 2918                 mprsas_set_ccbstatus(ccb, CAM_DATA_RUN_ERR);
 2919                 break;
 2920         case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
 2921         case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
 2922                 /*
 2923                  * These can sometimes be transient transport-related
 2924                  * errors, and sometimes persistent drive-related errors.
 2925                  * We used to retry these without decrementing the retry
 2926                  * count by returning CAM_REQUEUE_REQ.  Unfortunately, if
 2927                  * we hit a persistent drive problem that returns one of
 2928                  * these error codes, we would retry indefinitely.  So,
 2929                  * return CAM_REQ_CMP_ERROR so that we decrement the retry
 2930                  * count and avoid infinite retries.  We're taking the
 2931                  * potential risk of flagging false failures in the event
 2932                  * of a topology-related error (e.g. a SAS expander problem
 2933                  * causes a command addressed to a drive to fail), but
 2934                  * avoiding getting into an infinite retry loop.
 2935                  */
 2936                 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
 2937                 mprsas_log_command(cm, MPR_INFO,
 2938                     "terminated ioc %x loginfo %x scsi %x state %x xfer %u\n",
 2939                     le16toh(rep->IOCStatus), le32toh(rep->IOCLogInfo),
 2940                     rep->SCSIStatus, rep->SCSIState,
 2941                     le32toh(rep->TransferCount));
 2942                 break;
 2943         case MPI2_IOCSTATUS_INVALID_FUNCTION:
 2944         case MPI2_IOCSTATUS_INTERNAL_ERROR:
 2945         case MPI2_IOCSTATUS_INVALID_VPID:
 2946         case MPI2_IOCSTATUS_INVALID_FIELD:
 2947         case MPI2_IOCSTATUS_INVALID_STATE:
 2948         case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
 2949         case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
 2950         case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
 2951         case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
 2952         case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
 2953         default:
 2954                 mprsas_log_command(cm, MPR_XINFO,
 2955                     "completed ioc %x loginfo %x scsi %x state %x xfer %u\n",
 2956                     le16toh(rep->IOCStatus), le32toh(rep->IOCLogInfo),
 2957                     rep->SCSIStatus, rep->SCSIState,
 2958                     le32toh(rep->TransferCount));
 2959                 csio->resid = cm->cm_length;
 2960 
 2961                 if (scsi_cdb[0] == UNMAP &&
 2962                     target->is_nvme &&
 2963                     (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR)
 2964                         mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
 2965                 else
 2966                         mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
 2967 
 2968                 break;
 2969         }
 2970         
 2971         mpr_sc_failed_io_info(sc, csio, rep, cm->cm_targ);
 2972 
 2973         if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
 2974                 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
 2975                 sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
 2976                 mpr_dprint(sc, MPR_XINFO, "Command completed, unfreezing SIM "
 2977                     "queue\n");
 2978         }
 2979 
 2980         if (mprsas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
 2981                 ccb->ccb_h.status |= CAM_DEV_QFRZN;
 2982                 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
 2983         }
 2984 
 2985         mpr_free_command(sc, cm);
 2986         xpt_done(ccb);
 2987 }
 2988 
 2989 #if __FreeBSD_version >= 900026
 2990 static void
 2991 mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm)
 2992 {
 2993         MPI2_SMP_PASSTHROUGH_REPLY *rpl;
 2994         MPI2_SMP_PASSTHROUGH_REQUEST *req;
 2995         uint64_t sasaddr;
 2996         union ccb *ccb;
 2997 
 2998         ccb = cm->cm_complete_data;
 2999 
 3000         /*
 3001          * Currently there should be no way we can hit this case.  It only
 3002          * happens when we have a failure to allocate chain frames, and SMP
 3003          * commands require two S/G elements only.  That should be handled
 3004          * in the standard request size.
 3005          */
 3006         if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
 3007                 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x on SMP "
 3008                     "request!\n", __func__, cm->cm_flags);
 3009                 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
 3010                 goto bailout;
 3011         }
 3012 
 3013         rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
 3014         if (rpl == NULL) {
 3015                 mpr_dprint(sc, MPR_ERROR, "%s: NULL cm_reply!\n", __func__);
 3016                 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
 3017                 goto bailout;
 3018         }
 3019 
 3020         req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
 3021         sasaddr = le32toh(req->SASAddress.Low);
 3022         sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
 3023 
 3024         if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
 3025             MPI2_IOCSTATUS_SUCCESS ||
 3026             rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
 3027                 mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
 3028                     __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
 3029                 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
 3030                 goto bailout;
 3031         }
 3032 
 3033         mpr_dprint(sc, MPR_XINFO, "%s: SMP request to SAS address %#jx "
 3034             "completed successfully\n", __func__, (uintmax_t)sasaddr);
 3035 
 3036         if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
 3037                 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
 3038         else
 3039                 mprsas_set_ccbstatus(ccb, CAM_SMP_STATUS_ERROR);
 3040 
 3041 bailout:
 3042         /*
 3043          * We sync in both directions because we had DMAs in the S/G list
 3044          * in both directions.
 3045          */
 3046         bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
 3047                         BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
 3048         bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
 3049         mpr_free_command(sc, cm);
 3050         xpt_done(ccb);
 3051 }
 3052 
 3053 static void
 3054 mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
 3055 {
 3056         struct mpr_command *cm;
 3057         uint8_t *request, *response;
 3058         MPI2_SMP_PASSTHROUGH_REQUEST *req;
 3059         struct mpr_softc *sc;
 3060         struct sglist *sg;
 3061         int error;
 3062 
 3063         sc = sassc->sc;
 3064         sg = NULL;
 3065         error = 0;
 3066 
 3067 #if (__FreeBSD_version >= 1000028) || \
 3068     ((__FreeBSD_version >= 902001) && (__FreeBSD_version < 1000000))
 3069         switch (ccb->ccb_h.flags & CAM_DATA_MASK) {
 3070         case CAM_DATA_PADDR:
 3071         case CAM_DATA_SG_PADDR:
 3072                 /*
 3073                  * XXX We don't yet support physical addresses here.
 3074                  */
 3075                 mpr_dprint(sc, MPR_ERROR, "%s: physical addresses not "
 3076                     "supported\n", __func__);
 3077                 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
 3078                 xpt_done(ccb);
 3079                 return;
 3080         case CAM_DATA_SG:
 3081                 /*
 3082                  * The chip does not support more than one buffer for the
 3083                  * request or response.
 3084                  */
 3085                 if ((ccb->smpio.smp_request_sglist_cnt > 1)
 3086                     || (ccb->smpio.smp_response_sglist_cnt > 1)) {
 3087                         mpr_dprint(sc, MPR_ERROR, "%s: multiple request or "
 3088                             "response buffer segments not supported for SMP\n",
 3089                             __func__);
 3090                         mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
 3091                         xpt_done(ccb);
 3092                         return;
 3093                 }
 3094 
 3095                 /*
 3096                  * The CAM_SCATTER_VALID flag was originally implemented
 3097                  * for the XPT_SCSI_IO CCB, which only has one data pointer.
 3098                  * We have two.  So, just take that flag to mean that we
 3099                  * might have S/G lists, and look at the S/G segment count
 3100                  * to figure out whether that is the case for each individual
 3101                  * buffer.
 3102                  */
 3103                 if (ccb->smpio.smp_request_sglist_cnt != 0) {
 3104                         bus_dma_segment_t *req_sg;
 3105 
 3106                         req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
 3107                         request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
 3108                 } else
 3109                         request = ccb->smpio.smp_request;
 3110 
 3111                 if (ccb->smpio.smp_response_sglist_cnt != 0) {
 3112                         bus_dma_segment_t *rsp_sg;
 3113 
 3114                         rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
 3115                         response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
 3116                 } else
 3117                         response = ccb->smpio.smp_response;
 3118                 break;
 3119         case CAM_DATA_VADDR:
 3120                 request = ccb->smpio.smp_request;
 3121                 response = ccb->smpio.smp_response;
 3122                 break;
 3123         default:
 3124                 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
 3125                 xpt_done(ccb);
 3126                 return;
 3127         }
 3128 #else /* __FreeBSD_version < 1000028 */
 3129         /*
 3130          * XXX We don't yet support physical addresses here.
 3131          */
 3132         if (ccb->ccb_h.flags & (CAM_DATA_PHYS|CAM_SG_LIST_PHYS)) {
 3133                 mpr_dprint(sc, MPR_ERROR, "%s: physical addresses not "
 3134                     "supported\n", __func__);
 3135                 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
 3136                 xpt_done(ccb);
 3137                 return;
 3138         }
 3139 
 3140         /*
 3141          * If the user wants to send an S/G list, check to make sure they
 3142          * have single buffers.
 3143          */
 3144         if (ccb->ccb_h.flags & CAM_SCATTER_VALID) {
 3145                 /*
 3146                  * The chip does not support more than one buffer for the
 3147                  * request or response.
 3148                  */
 3149                 if ((ccb->smpio.smp_request_sglist_cnt > 1)
 3150                   || (ccb->smpio.smp_response_sglist_cnt > 1)) {
 3151                         mpr_dprint(sc, MPR_ERROR, "%s: multiple request or "
 3152                             "response buffer segments not supported for SMP\n",
 3153                             __func__);
 3154                         mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
 3155                         xpt_done(ccb);
 3156                         return;
 3157                 }
 3158 
 3159                 /*
 3160                  * The CAM_SCATTER_VALID flag was originally implemented
 3161                  * for the XPT_SCSI_IO CCB, which only has one data pointer.
 3162                  * We have two.  So, just take that flag to mean that we
 3163                  * might have S/G lists, and look at the S/G segment count
 3164                  * to figure out whether that is the case for each individual
 3165                  * buffer.
 3166                  */
 3167                 if (ccb->smpio.smp_request_sglist_cnt != 0) {
 3168                         bus_dma_segment_t *req_sg;
 3169 
 3170                         req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
 3171                         request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
 3172                 } else
 3173                         request = ccb->smpio.smp_request;
 3174 
 3175                 if (ccb->smpio.smp_response_sglist_cnt != 0) {
 3176                         bus_dma_segment_t *rsp_sg;
 3177 
 3178                         rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
 3179                         response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
 3180                 } else
 3181                         response = ccb->smpio.smp_response;
 3182         } else {
 3183                 request = ccb->smpio.smp_request;
 3184                 response = ccb->smpio.smp_response;
 3185         }
 3186 #endif /* __FreeBSD_version < 1000028 */
 3187 
 3188         cm = mpr_alloc_command(sc);
 3189         if (cm == NULL) {
 3190                 mpr_dprint(sc, MPR_ERROR, "%s: cannot allocate command\n",
 3191                     __func__);
 3192                 mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
 3193                 xpt_done(ccb);
 3194                 return;
 3195         }
 3196 
 3197         req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
 3198         bzero(req, sizeof(*req));
 3199         req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
 3200 
 3201         /* Allow the chip to use any route to this SAS address. */
 3202         req->PhysicalPort = 0xff;
 3203 
 3204         req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
 3205         req->SGLFlags = 
 3206             MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
 3207 
 3208         mpr_dprint(sc, MPR_XINFO, "%s: sending SMP request to SAS address "
 3209             "%#jx\n", __func__, (uintmax_t)sasaddr);
 3210 
 3211         mpr_init_sge(cm, req, &req->SGL);
 3212 
 3213         /*
 3214          * Set up a uio to pass into mpr_map_command().  This allows us to
 3215          * do one map command, and one busdma call in there.
 3216          */
 3217         cm->cm_uio.uio_iov = cm->cm_iovec;
 3218         cm->cm_uio.uio_iovcnt = 2;
 3219         cm->cm_uio.uio_segflg = UIO_SYSSPACE;
 3220 
 3221         /*
 3222          * The read/write flag isn't used by busdma, but set it just in
 3223          * case.  This isn't exactly accurate, either, since we're going in
 3224          * both directions.
 3225          */
 3226         cm->cm_uio.uio_rw = UIO_WRITE;
 3227 
 3228         cm->cm_iovec[0].iov_base = request;
 3229         cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
 3230         cm->cm_iovec[1].iov_base = response;
 3231         cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
 3232 
 3233         cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
 3234                                cm->cm_iovec[1].iov_len;
 3235 
 3236         /*
 3237          * Trigger a warning message in mpr_data_cb() for the user if we
 3238          * wind up exceeding two S/G segments.  The chip expects one
 3239          * segment for the request and another for the response.
 3240          */
 3241         cm->cm_max_segs = 2;
 3242 
 3243         cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
 3244         cm->cm_complete = mprsas_smpio_complete;
 3245         cm->cm_complete_data = ccb;
 3246 
 3247         /*
 3248          * Tell the mapping code that we're using a uio, and that this is
 3249          * an SMP passthrough request.  There is a little special-case
 3250          * logic there (in mpr_data_cb()) to handle the bidirectional
 3251          * transfer.  
 3252          */
 3253         cm->cm_flags |= MPR_CM_FLAGS_USE_UIO | MPR_CM_FLAGS_SMP_PASS |
 3254                         MPR_CM_FLAGS_DATAIN | MPR_CM_FLAGS_DATAOUT;
 3255 
 3256         /* The chip data format is little endian. */
 3257         req->SASAddress.High = htole32(sasaddr >> 32);
 3258         req->SASAddress.Low = htole32(sasaddr);
 3259 
 3260         /*
 3261          * XXX Note that we don't have a timeout/abort mechanism here.
 3262          * From the manual, it looks like task management requests only
 3263          * work for SCSI IO and SATA passthrough requests.  We may need to
 3264          * have a mechanism to retry requests in the event of a chip reset
 3265          * at least.  Hopefully the chip will insure that any errors short
 3266          * of that are relayed back to the driver.
 3267          */
 3268         error = mpr_map_command(sc, cm);
 3269         if ((error != 0) && (error != EINPROGRESS)) {
 3270                 mpr_dprint(sc, MPR_ERROR, "%s: error %d returned from "
 3271                     "mpr_map_command()\n", __func__, error);
 3272                 goto bailout_error;
 3273         }
 3274 
 3275         return;
 3276 
 3277 bailout_error:
 3278         mpr_free_command(sc, cm);
 3279         mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
 3280         xpt_done(ccb);
 3281         return;
 3282 }
 3283 
 3284 static void
 3285 mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb)
 3286 {
 3287         struct mpr_softc *sc;
 3288         struct mprsas_target *targ;
 3289         uint64_t sasaddr = 0;
 3290 
 3291         sc = sassc->sc;
 3292 
 3293         /*
 3294          * Make sure the target exists.
 3295          */
 3296         KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
 3297             ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id));
 3298         targ = &sassc->targets[ccb->ccb_h.target_id];
 3299         if (targ->handle == 0x0) {
 3300                 mpr_dprint(sc, MPR_ERROR, "%s: target %d does not exist!\n",
 3301                     __func__, ccb->ccb_h.target_id);
 3302                 mprsas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
 3303                 xpt_done(ccb);
 3304                 return;
 3305         }
 3306 
 3307         /*
 3308          * If this device has an embedded SMP target, we'll talk to it
 3309          * directly.
 3310          * figure out what the expander's address is.
 3311          */
 3312         if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
 3313                 sasaddr = targ->sasaddr;
 3314 
 3315         /*
 3316          * If we don't have a SAS address for the expander yet, try
 3317          * grabbing it from the page 0x83 information cached in the
 3318          * transport layer for this target.  LSI expanders report the
 3319          * expander SAS address as the port-associated SAS address in
 3320          * Inquiry VPD page 0x83.  Maxim expanders don't report it in page
 3321          * 0x83.
 3322          *
 3323          * XXX KDM disable this for now, but leave it commented out so that
 3324          * it is obvious that this is another possible way to get the SAS
 3325          * address.
 3326          *
 3327          * The parent handle method below is a little more reliable, and
 3328          * the other benefit is that it works for devices other than SES
 3329          * devices.  So you can send a SMP request to a da(4) device and it
 3330          * will get routed to the expander that device is attached to.
 3331          * (Assuming the da(4) device doesn't contain an SMP target...)
 3332          */
 3333 #if 0
 3334         if (sasaddr == 0)
 3335                 sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
 3336 #endif
 3337 
 3338         /*
 3339          * If we still don't have a SAS address for the expander, look for
 3340          * the parent device of this device, which is probably the expander.
 3341          */
 3342         if (sasaddr == 0) {
 3343 #ifdef OLD_MPR_PROBE
 3344                 struct mprsas_target *parent_target;
 3345 #endif
 3346 
 3347                 if (targ->parent_handle == 0x0) {
 3348                         mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
 3349                             "a valid parent handle!\n", __func__, targ->handle);
 3350                         mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
 3351                         goto bailout;
 3352                 }
 3353 #ifdef OLD_MPR_PROBE
 3354                 parent_target = mprsas_find_target_by_handle(sassc, 0,
 3355                     targ->parent_handle);
 3356 
 3357                 if (parent_target == NULL) {
 3358                         mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
 3359                             "a valid parent target!\n", __func__, targ->handle);
 3360                         mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
 3361                         goto bailout;
 3362                 }
 3363 
 3364                 if ((parent_target->devinfo &
 3365                      MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
 3366                         mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
 3367                             "does not have an SMP target!\n", __func__,
 3368                             targ->handle, parent_target->handle);
 3369                         mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
 3370                         goto bailout;
 3371                 }
 3372 
 3373                 sasaddr = parent_target->sasaddr;
 3374 #else /* OLD_MPR_PROBE */
 3375                 if ((targ->parent_devinfo &
 3376                      MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
 3377                         mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
 3378                             "does not have an SMP target!\n", __func__,
 3379                             targ->handle, targ->parent_handle);
 3380                         mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
 3381                         goto bailout;
 3382 
 3383                 }
 3384                 if (targ->parent_sasaddr == 0x0) {
 3385                         mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent handle "
 3386                             "%d does not have a valid SAS address!\n", __func__,
 3387                             targ->handle, targ->parent_handle);
 3388                         mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
 3389                         goto bailout;
 3390                 }
 3391 
 3392                 sasaddr = targ->parent_sasaddr;
 3393 #endif /* OLD_MPR_PROBE */
 3394 
 3395         }
 3396 
 3397         if (sasaddr == 0) {
 3398                 mpr_dprint(sc, MPR_INFO, "%s: unable to find SAS address for "
 3399                     "handle %d\n", __func__, targ->handle);
 3400                 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
 3401                 goto bailout;
 3402         }
 3403         mprsas_send_smpcmd(sassc, ccb, sasaddr);
 3404 
 3405         return;
 3406 
 3407 bailout:
 3408         xpt_done(ccb);
 3409 
 3410 }
 3411 #endif //__FreeBSD_version >= 900026
 3412 
 3413 static void
 3414 mprsas_action_resetdev(struct mprsas_softc *sassc, union ccb *ccb)
 3415 {
 3416         MPI2_SCSI_TASK_MANAGE_REQUEST *req;
 3417         struct mpr_softc *sc;
 3418         struct mpr_command *tm;
 3419         struct mprsas_target *targ;
 3420 
 3421         MPR_FUNCTRACE(sassc->sc);
 3422         mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED);
 3423 
 3424         KASSERT(ccb->ccb_h.target_id < sassc->maxtargets, ("Target %d out of "
 3425             "bounds in XPT_RESET_DEV\n", ccb->ccb_h.target_id));
 3426         sc = sassc->sc;
 3427         tm = mpr_alloc_command(sc);
 3428         if (tm == NULL) {
 3429                 mpr_dprint(sc, MPR_ERROR, "command alloc failure in "
 3430                     "mprsas_action_resetdev\n");
 3431                 mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
 3432                 xpt_done(ccb);
 3433                 return;
 3434         }
 3435 
 3436         targ = &sassc->targets[ccb->ccb_h.target_id];
 3437         req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
 3438         req->DevHandle = htole16(targ->handle);
 3439         req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
 3440         req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
 3441 
 3442         /* SAS Hard Link Reset / SATA Link Reset */
 3443         req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
 3444 
 3445         tm->cm_data = NULL;
 3446         tm->cm_desc.HighPriority.RequestFlags =
 3447             MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
 3448         tm->cm_complete = mprsas_resetdev_complete;
 3449         tm->cm_complete_data = ccb;
 3450 
 3451         mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
 3452             __func__, targ->tid);
 3453         tm->cm_targ = targ;
 3454         targ->flags |= MPRSAS_TARGET_INRESET;
 3455 
 3456         mpr_map_command(sc, tm);
 3457 }
 3458 
 3459 static void
 3460 mprsas_resetdev_complete(struct mpr_softc *sc, struct mpr_command *tm)
 3461 {
 3462         MPI2_SCSI_TASK_MANAGE_REPLY *resp;
 3463         union ccb *ccb;
 3464 
 3465         MPR_FUNCTRACE(sc);
 3466         mtx_assert(&sc->mpr_mtx, MA_OWNED);
 3467 
 3468         resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
 3469         ccb = tm->cm_complete_data;
 3470 
 3471         /*
 3472          * Currently there should be no way we can hit this case.  It only
 3473          * happens when we have a failure to allocate chain frames, and
 3474          * task management commands don't have S/G lists.
 3475          */
 3476         if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
 3477                 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
 3478 
 3479                 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
 3480 
 3481                 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for reset of "
 3482                     "handle %#04x! This should not happen!\n", __func__,
 3483                     tm->cm_flags, req->DevHandle);
 3484                 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
 3485                 goto bailout;
 3486         }
 3487 
 3488         mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n",
 3489             __func__, le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
 3490 
 3491         if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
 3492                 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
 3493                 mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
 3494                     CAM_LUN_WILDCARD);
 3495         }
 3496         else
 3497                 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
 3498 
 3499 bailout:
 3500 
 3501         mprsas_free_tm(sc, tm);
 3502         xpt_done(ccb);
 3503 }
 3504 
 3505 static void
 3506 mprsas_poll(struct cam_sim *sim)
 3507 {
 3508         struct mprsas_softc *sassc;
 3509 
 3510         sassc = cam_sim_softc(sim);
 3511 
 3512         if (sassc->sc->mpr_debug & MPR_TRACE) {
 3513                 /* frequent debug messages during a panic just slow
 3514                  * everything down too much.
 3515                  */
 3516                 mpr_dprint(sassc->sc, MPR_XINFO, "%s clearing MPR_TRACE\n",
 3517                     __func__);
 3518                 sassc->sc->mpr_debug &= ~MPR_TRACE;
 3519         }
 3520 
 3521         mpr_intr_locked(sassc->sc);
 3522 }
 3523 
 3524 static void
 3525 mprsas_async(void *callback_arg, uint32_t code, struct cam_path *path,
 3526     void *arg)
 3527 {
 3528         struct mpr_softc *sc;
 3529 
 3530         sc = (struct mpr_softc *)callback_arg;
 3531 
 3532         switch (code) {
 3533 #if (__FreeBSD_version >= 1000006) || \
 3534     ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
 3535         case AC_ADVINFO_CHANGED: {
 3536                 struct mprsas_target *target;
 3537                 struct mprsas_softc *sassc;
 3538                 struct scsi_read_capacity_data_long rcap_buf;
 3539                 struct ccb_dev_advinfo cdai;
 3540                 struct mprsas_lun *lun;
 3541                 lun_id_t lunid;
 3542                 int found_lun;
 3543                 uintptr_t buftype;
 3544 
 3545                 buftype = (uintptr_t)arg;
 3546 
 3547                 found_lun = 0;
 3548                 sassc = sc->sassc;
 3549 
 3550                 /*
 3551                  * We're only interested in read capacity data changes.
 3552                  */
 3553                 if (buftype != CDAI_TYPE_RCAPLONG)
 3554                         break;
 3555 
 3556                 /*
 3557                  * See the comment in mpr_attach_sas() for a detailed
 3558                  * explanation.  In these versions of FreeBSD we register
 3559                  * for all events and filter out the events that don't
 3560                  * apply to us.
 3561                  */
 3562 #if (__FreeBSD_version < 1000703) || \
 3563     ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
 3564                 if (xpt_path_path_id(path) != sassc->sim->path_id)
 3565                         break;
 3566 #endif
 3567 
 3568                 /*
 3569                  * We should have a handle for this, but check to make sure.
 3570                  */
 3571                 KASSERT(xpt_path_target_id(path) < sassc->maxtargets,
 3572                     ("Target %d out of bounds in mprsas_async\n",
 3573                     xpt_path_target_id(path)));
 3574                 target = &sassc->targets[xpt_path_target_id(path)];
 3575                 if (target->handle == 0)
 3576                         break;
 3577 
 3578                 lunid = xpt_path_lun_id(path);
 3579 
 3580                 SLIST_FOREACH(lun, &target->luns, lun_link) {
 3581                         if (lun->lun_id == lunid) {
 3582                                 found_lun = 1;
 3583                                 break;
 3584                         }
 3585                 }
 3586 
 3587                 if (found_lun == 0) {
 3588                         lun = malloc(sizeof(struct mprsas_lun), M_MPR,
 3589                             M_NOWAIT | M_ZERO);
 3590                         if (lun == NULL) {
 3591                                 mpr_dprint(sc, MPR_ERROR, "Unable to alloc "
 3592                                     "LUN for EEDP support.\n");
 3593                                 break;
 3594                         }
 3595                         lun->lun_id = lunid;
 3596                         SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
 3597                 }
 3598 
 3599                 bzero(&rcap_buf, sizeof(rcap_buf));
 3600                 xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
 3601                 cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
 3602                 cdai.ccb_h.flags = CAM_DIR_IN;
 3603                 cdai.buftype = CDAI_TYPE_RCAPLONG;
 3604 #if (__FreeBSD_version >= 1100061) || \
 3605     ((__FreeBSD_version >= 1001510) && (__FreeBSD_version < 1100000))
 3606                 cdai.flags = CDAI_FLAG_NONE;
 3607 #else
 3608                 cdai.flags = 0;
 3609 #endif
 3610                 cdai.bufsiz = sizeof(rcap_buf);
 3611                 cdai.buf = (uint8_t *)&rcap_buf;
 3612                 xpt_action((union ccb *)&cdai);
 3613                 if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
 3614                         cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE);
 3615 
 3616                 if ((mprsas_get_ccbstatus((union ccb *)&cdai) == CAM_REQ_CMP)
 3617                     && (rcap_buf.prot & SRC16_PROT_EN)) {
 3618                         switch (rcap_buf.prot & SRC16_P_TYPE) {
 3619                         case SRC16_PTYPE_1:
 3620                         case SRC16_PTYPE_3:
 3621                                 lun->eedp_formatted = TRUE;
 3622                                 lun->eedp_block_size =
 3623                                     scsi_4btoul(rcap_buf.length);
 3624                                 break;
 3625                         case SRC16_PTYPE_2:
 3626                         default:
 3627                                 lun->eedp_formatted = FALSE;
 3628                                 lun->eedp_block_size = 0;
 3629                                 break;
 3630                         }
 3631                 } else {
 3632                         lun->eedp_formatted = FALSE;
 3633                         lun->eedp_block_size = 0;
 3634                 }
 3635                 break;
 3636         }
 3637 #endif
 3638         case AC_FOUND_DEVICE: {
 3639                 struct ccb_getdev *cgd;
 3640 
 3641                 /*
 3642                  * See the comment in mpr_attach_sas() for a detailed
 3643                  * explanation.  In these versions of FreeBSD we register
 3644                  * for all events and filter out the events that don't
 3645                  * apply to us.
 3646                  */
 3647 #if (__FreeBSD_version < 1000703) || \
 3648     ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
 3649                 if (xpt_path_path_id(path) != sc->sassc->sim->path_id)
 3650                         break;
 3651 #endif
 3652 
 3653                 cgd = arg;
 3654 #if (__FreeBSD_version < 901503) || \
 3655     ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
 3656                 mprsas_check_eedp(sc, path, cgd);
 3657 #endif
 3658                 break;
 3659         }
 3660         default:
 3661                 break;
 3662         }
 3663 }
 3664 
 3665 #if (__FreeBSD_version < 901503) || \
 3666     ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
 3667 static void
 3668 mprsas_check_eedp(struct mpr_softc *sc, struct cam_path *path,
 3669     struct ccb_getdev *cgd)
 3670 {
 3671         struct mprsas_softc *sassc = sc->sassc;
 3672         struct ccb_scsiio *csio;
 3673         struct scsi_read_capacity_16 *scsi_cmd;
 3674         struct scsi_read_capacity_eedp *rcap_buf;
 3675         path_id_t pathid;
 3676         target_id_t targetid;
 3677         lun_id_t lunid;
 3678         union ccb *ccb;
 3679         struct cam_path *local_path;
 3680         struct mprsas_target *target;
 3681         struct mprsas_lun *lun;
 3682         uint8_t found_lun;
 3683         char path_str[64];
 3684 
 3685         pathid = cam_sim_path(sassc->sim);
 3686         targetid = xpt_path_target_id(path);
 3687         lunid = xpt_path_lun_id(path);
 3688 
 3689         KASSERT(targetid < sassc->maxtargets, ("Target %d out of bounds in "
 3690             "mprsas_check_eedp\n", targetid));
 3691         target = &sassc->targets[targetid];
 3692         if (target->handle == 0x0)
 3693                 return;
 3694 
 3695         /*
 3696          * Determine if the device is EEDP capable.
 3697          *
 3698          * If this flag is set in the inquiry data, the device supports
 3699          * protection information, and must support the 16 byte read capacity
 3700          * command, otherwise continue without sending read cap 16.
 3701          */
 3702         if ((cgd->inq_data.spc3_flags & SPC3_SID_PROTECT) == 0)
 3703                 return;
 3704 
 3705         /*
 3706          * Issue a READ CAPACITY 16 command.  This info is used to determine if
 3707          * the LUN is formatted for EEDP support.
 3708          */
 3709         ccb = xpt_alloc_ccb_nowait();
 3710         if (ccb == NULL) {
 3711                 mpr_dprint(sc, MPR_ERROR, "Unable to alloc CCB for EEDP "
 3712                     "support.\n");
 3713                 return;
 3714         }
 3715 
 3716         if (xpt_create_path(&local_path, xpt_periph, pathid, targetid, lunid) !=
 3717             CAM_REQ_CMP) {
 3718                 mpr_dprint(sc, MPR_ERROR, "Unable to create path for EEDP "
 3719                     "support.\n");
 3720                 xpt_free_ccb(ccb);
 3721                 return;
 3722         }
 3723 
 3724         /*
 3725          * If LUN is already in list, don't create a new one.
 3726          */
 3727         found_lun = FALSE;
 3728         SLIST_FOREACH(lun, &target->luns, lun_link) {
 3729                 if (lun->lun_id == lunid) {
 3730                         found_lun = TRUE;
 3731                         break;
 3732                 }
 3733         }
 3734         if (!found_lun) {
 3735                 lun = malloc(sizeof(struct mprsas_lun), M_MPR,
 3736                     M_NOWAIT | M_ZERO);
 3737                 if (lun == NULL) {
 3738                         mpr_dprint(sc, MPR_ERROR, "Unable to alloc LUN for "
 3739                             "EEDP support.\n");
 3740                         xpt_free_path(local_path);
 3741                         xpt_free_ccb(ccb);
 3742                         return;
 3743                 }
 3744                 lun->lun_id = lunid;
 3745                 SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
 3746         }
 3747 
 3748         xpt_path_string(local_path, path_str, sizeof(path_str));
 3749         mpr_dprint(sc, MPR_INFO, "Sending read cap: path %s handle %d\n",
 3750             path_str, target->handle);
 3751 
 3752         /*
 3753          * Issue a READ CAPACITY 16 command for the LUN.  The
 3754          * mprsas_read_cap_done function will load the read cap info into the
 3755          * LUN struct.
 3756          */
 3757         rcap_buf = malloc(sizeof(struct scsi_read_capacity_eedp), M_MPR,
 3758             M_NOWAIT | M_ZERO);
 3759         if (rcap_buf == NULL) {
 3760                 mpr_dprint(sc, MPR_ERROR, "Unable to alloc read capacity "
 3761                     "buffer for EEDP support.\n");
 3762                 xpt_free_path(ccb->ccb_h.path);
 3763                 xpt_free_ccb(ccb);
 3764                 return;
 3765         }
 3766         xpt_setup_ccb(&ccb->ccb_h, local_path, CAM_PRIORITY_XPT);
 3767         csio = &ccb->csio;
 3768         csio->ccb_h.func_code = XPT_SCSI_IO;
 3769         csio->ccb_h.flags = CAM_DIR_IN;
 3770         csio->ccb_h.retry_count = 4;    
 3771         csio->ccb_h.cbfcnp = mprsas_read_cap_done;
 3772         csio->ccb_h.timeout = 60000;
 3773         csio->data_ptr = (uint8_t *)rcap_buf;
 3774         csio->dxfer_len = sizeof(struct scsi_read_capacity_eedp);
 3775         csio->sense_len = MPR_SENSE_LEN;
 3776         csio->cdb_len = sizeof(*scsi_cmd);
 3777         csio->tag_action = MSG_SIMPLE_Q_TAG;
 3778 
 3779         scsi_cmd = (struct scsi_read_capacity_16 *)&csio->cdb_io.cdb_bytes;
 3780         bzero(scsi_cmd, sizeof(*scsi_cmd));
 3781         scsi_cmd->opcode = 0x9E;
 3782         scsi_cmd->service_action = SRC16_SERVICE_ACTION;
 3783         ((uint8_t *)scsi_cmd)[13] = sizeof(struct scsi_read_capacity_eedp);
 3784 
 3785         ccb->ccb_h.ppriv_ptr1 = sassc;
 3786         xpt_action(ccb);
 3787 }
 3788 
 3789 static void
 3790 mprsas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb)
 3791 {
 3792         struct mprsas_softc *sassc;
 3793         struct mprsas_target *target;
 3794         struct mprsas_lun *lun;
 3795         struct scsi_read_capacity_eedp *rcap_buf;
 3796 
 3797         if (done_ccb == NULL)
 3798                 return;
 3799         
 3800         /* Driver need to release devq, it Scsi command is
 3801          * generated by driver internally.
 3802          * Currently there is a single place where driver
 3803          * calls scsi command internally. In future if driver
 3804          * calls more scsi command internally, it needs to release
 3805          * devq internally, since those command will not go back to
 3806          * cam_periph.
 3807          */
 3808         if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) ) {
 3809                 done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
 3810                 xpt_release_devq(done_ccb->ccb_h.path,
 3811                                 /*count*/ 1, /*run_queue*/TRUE);
 3812         }
 3813 
 3814         rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr;
 3815 
 3816         /*
 3817          * Get the LUN ID for the path and look it up in the LUN list for the
 3818          * target.
 3819          */
 3820         sassc = (struct mprsas_softc *)done_ccb->ccb_h.ppriv_ptr1;
 3821         KASSERT(done_ccb->ccb_h.target_id < sassc->maxtargets, ("Target %d out "
 3822             "of bounds in mprsas_read_cap_done\n", done_ccb->ccb_h.target_id));
 3823         target = &sassc->targets[done_ccb->ccb_h.target_id];
 3824         SLIST_FOREACH(lun, &target->luns, lun_link) {
 3825                 if (lun->lun_id != done_ccb->ccb_h.target_lun)
 3826                         continue;
 3827 
 3828                 /*
 3829                  * Got the LUN in the target's LUN list.  Fill it in with EEDP
 3830                  * info. If the READ CAP 16 command had some SCSI error (common
 3831                  * if command is not supported), mark the lun as not supporting
 3832                  * EEDP and set the block size to 0.
 3833                  */
 3834                 if ((mprsas_get_ccbstatus(done_ccb) != CAM_REQ_CMP) ||
 3835                     (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) {
 3836                         lun->eedp_formatted = FALSE;
 3837                         lun->eedp_block_size = 0;
 3838                         break;
 3839                 }
 3840 
 3841                 if (rcap_buf->protect & 0x01) {
 3842                         mpr_dprint(sassc->sc, MPR_INFO, "LUN %d for target ID "
 3843                             "%d is formatted for EEDP support.\n",
 3844                             done_ccb->ccb_h.target_lun,
 3845                             done_ccb->ccb_h.target_id);
 3846                         lun->eedp_formatted = TRUE;
 3847                         lun->eedp_block_size = scsi_4btoul(rcap_buf->length);
 3848                 }
 3849                 break;
 3850         }
 3851 
 3852         // Finished with this CCB and path.
 3853         free(rcap_buf, M_MPR);
 3854         xpt_free_path(done_ccb->ccb_h.path);
 3855         xpt_free_ccb(done_ccb);
 3856 }
 3857 #endif /* (__FreeBSD_version < 901503) || \
 3858           ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) */
 3859 
 3860 void
 3861 mprsas_prepare_for_tm(struct mpr_softc *sc, struct mpr_command *tm,
 3862     struct mprsas_target *target, lun_id_t lun_id)
 3863 {
 3864         union ccb *ccb;
 3865         path_id_t path_id;
 3866 
 3867         /*
 3868          * Set the INRESET flag for this target so that no I/O will be sent to
 3869          * the target until the reset has completed.  If an I/O request does
 3870          * happen, the devq will be frozen.  The CCB holds the path which is
 3871          * used to release the devq.  The devq is released and the CCB is freed
 3872          * when the TM completes.
 3873          */
 3874         ccb = xpt_alloc_ccb_nowait();
 3875         if (ccb) {
 3876                 path_id = cam_sim_path(sc->sassc->sim);
 3877                 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, path_id,
 3878                     target->tid, lun_id) != CAM_REQ_CMP) {
 3879                         xpt_free_ccb(ccb);
 3880                 } else {
 3881                         tm->cm_ccb = ccb;
 3882                         tm->cm_targ = target;
 3883                         target->flags |= MPRSAS_TARGET_INRESET;
 3884                 }
 3885         }
 3886 }
 3887 
 3888 int
 3889 mprsas_startup(struct mpr_softc *sc)
 3890 {
 3891         /*
 3892          * Send the port enable message and set the wait_for_port_enable flag.
 3893          * This flag helps to keep the simq frozen until all discovery events
 3894          * are processed.
 3895          */
 3896         sc->wait_for_port_enable = 1;
 3897         mprsas_send_portenable(sc);
 3898         return (0);
 3899 }
 3900 
 3901 static int
 3902 mprsas_send_portenable(struct mpr_softc *sc)
 3903 {
 3904         MPI2_PORT_ENABLE_REQUEST *request;
 3905         struct mpr_command *cm;
 3906 
 3907         MPR_FUNCTRACE(sc);
 3908 
 3909         if ((cm = mpr_alloc_command(sc)) == NULL)
 3910                 return (EBUSY);
 3911         request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
 3912         request->Function = MPI2_FUNCTION_PORT_ENABLE;
 3913         request->MsgFlags = 0;
 3914         request->VP_ID = 0;
 3915         cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
 3916         cm->cm_complete = mprsas_portenable_complete;
 3917         cm->cm_data = NULL;
 3918         cm->cm_sge = NULL;
 3919 
 3920         mpr_map_command(sc, cm);
 3921         mpr_dprint(sc, MPR_XINFO, 
 3922             "mpr_send_portenable finished cm %p req %p complete %p\n",
 3923             cm, cm->cm_req, cm->cm_complete);
 3924         return (0);
 3925 }
 3926 
 3927 static void
 3928 mprsas_portenable_complete(struct mpr_softc *sc, struct mpr_command *cm)
 3929 {
 3930         MPI2_PORT_ENABLE_REPLY *reply;
 3931         struct mprsas_softc *sassc;
 3932 
 3933         MPR_FUNCTRACE(sc);
 3934         sassc = sc->sassc;
 3935 
 3936         /*
 3937          * Currently there should be no way we can hit this case.  It only
 3938          * happens when we have a failure to allocate chain frames, and
 3939          * port enable commands don't have S/G lists.
 3940          */
 3941         if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
 3942                 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for port enable! "
 3943                     "This should not happen!\n", __func__, cm->cm_flags);
 3944         }
 3945 
 3946         reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
 3947         if (reply == NULL)
 3948                 mpr_dprint(sc, MPR_FAULT, "Portenable NULL reply\n");
 3949         else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
 3950             MPI2_IOCSTATUS_SUCCESS)
 3951                 mpr_dprint(sc, MPR_FAULT, "Portenable failed\n");
 3952 
 3953         mpr_free_command(sc, cm);
 3954         if (sc->mpr_ich.ich_arg != NULL) {
 3955                 mpr_dprint(sc, MPR_XINFO, "disestablish config intrhook\n");
 3956                 config_intrhook_disestablish(&sc->mpr_ich);
 3957                 sc->mpr_ich.ich_arg = NULL;
 3958         }
 3959 
 3960         /*
 3961          * Done waiting for port enable to complete.  Decrement the refcount.
 3962          * If refcount is 0, discovery is complete and a rescan of the bus can
 3963          * take place.
 3964          */
 3965         sc->wait_for_port_enable = 0;
 3966         sc->port_enable_complete = 1;
 3967         wakeup(&sc->port_enable_complete);
 3968         mprsas_startup_decrement(sassc);
 3969 }
 3970 
 3971 int
 3972 mprsas_check_id(struct mprsas_softc *sassc, int id)
 3973 {
 3974         struct mpr_softc *sc = sassc->sc;
 3975         char *ids;
 3976         char *name;
 3977 
 3978         ids = &sc->exclude_ids[0];
 3979         while((name = strsep(&ids, ",")) != NULL) {
 3980                 if (name[0] == '\0')
 3981                         continue;
 3982                 if (strtol(name, NULL, 0) == (long)id)
 3983                         return (1);
 3984         }
 3985 
 3986         return (0);
 3987 }
 3988 
 3989 void
 3990 mprsas_realloc_targets(struct mpr_softc *sc, int maxtargets)
 3991 {
 3992         struct mprsas_softc *sassc;
 3993         struct mprsas_lun *lun, *lun_tmp;
 3994         struct mprsas_target *targ;
 3995         int i;
 3996 
 3997         sassc = sc->sassc;
 3998         /*
 3999          * The number of targets is based on IOC Facts, so free all of
 4000          * the allocated LUNs for each target and then the target buffer
 4001          * itself.
 4002          */
 4003         for (i=0; i< maxtargets; i++) {
 4004                 targ = &sassc->targets[i];
 4005                 SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
 4006                         free(lun, M_MPR);
 4007                 }
 4008         }
 4009         free(sassc->targets, M_MPR);
 4010 
 4011         sassc->targets = malloc(sizeof(struct mprsas_target) * maxtargets,
 4012             M_MPR, M_WAITOK|M_ZERO);
 4013         if (!sassc->targets) {
 4014                 panic("%s failed to alloc targets with error %d\n",
 4015                     __func__, ENOMEM);
 4016         }
 4017 }

Cache object: fe1aa2e8cac656c699a78d6978e330ae


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.