The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/mps/mps_sas.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
    3  *
    4  * Copyright (c) 2009 Yahoo! Inc.
    5  * Copyright (c) 2011-2015 LSI Corp.
    6  * Copyright (c) 2013-2015 Avago Technologies
    7  * All rights reserved.
    8  *
    9  * Redistribution and use in source and binary forms, with or without
   10  * modification, are permitted provided that the following conditions
   11  * are met:
   12  * 1. Redistributions of source code must retain the above copyright
   13  *    notice, this list of conditions and the following disclaimer.
   14  * 2. Redistributions in binary form must reproduce the above copyright
   15  *    notice, this list of conditions and the following disclaimer in the
   16  *    documentation and/or other materials provided with the distribution.
   17  *
   18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   28  * SUCH DAMAGE.
   29  *
   30  * Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD
   31  *
   32  * $FreeBSD: releng/12.0/sys/dev/mps/mps_sas.c 331422 2018-03-23 13:52:26Z ken $
   33  */
   34 
   35 #include <sys/cdefs.h>
   36 __FBSDID("$FreeBSD: releng/12.0/sys/dev/mps/mps_sas.c 331422 2018-03-23 13:52:26Z ken $");
   37 
   38 /* Communications core for Avago Technologies (LSI) MPT2 */
   39 
   40 /* TODO Move headers to mpsvar */
   41 #include <sys/types.h>
   42 #include <sys/param.h>
   43 #include <sys/systm.h>
   44 #include <sys/kernel.h>
   45 #include <sys/selinfo.h>
   46 #include <sys/module.h>
   47 #include <sys/bus.h>
   48 #include <sys/conf.h>
   49 #include <sys/bio.h>
   50 #include <sys/malloc.h>
   51 #include <sys/uio.h>
   52 #include <sys/sysctl.h>
   53 #include <sys/endian.h>
   54 #include <sys/queue.h>
   55 #include <sys/kthread.h>
   56 #include <sys/taskqueue.h>
   57 #include <sys/sbuf.h>
   58 
   59 #include <machine/bus.h>
   60 #include <machine/resource.h>
   61 #include <sys/rman.h>
   62 
   63 #include <machine/stdarg.h>
   64 
   65 #include <cam/cam.h>
   66 #include <cam/cam_ccb.h>
   67 #include <cam/cam_xpt.h>
   68 #include <cam/cam_debug.h>
   69 #include <cam/cam_sim.h>
   70 #include <cam/cam_xpt_sim.h>
   71 #include <cam/cam_xpt_periph.h>
   72 #include <cam/cam_periph.h>
   73 #include <cam/scsi/scsi_all.h>
   74 #include <cam/scsi/scsi_message.h>
   75 #if __FreeBSD_version >= 900026
   76 #include <cam/scsi/smp_all.h>
   77 #endif
   78 
   79 #include <dev/mps/mpi/mpi2_type.h>
   80 #include <dev/mps/mpi/mpi2.h>
   81 #include <dev/mps/mpi/mpi2_ioc.h>
   82 #include <dev/mps/mpi/mpi2_sas.h>
   83 #include <dev/mps/mpi/mpi2_cnfg.h>
   84 #include <dev/mps/mpi/mpi2_init.h>
   85 #include <dev/mps/mpi/mpi2_tool.h>
   86 #include <dev/mps/mps_ioctl.h>
   87 #include <dev/mps/mpsvar.h>
   88 #include <dev/mps/mps_table.h>
   89 #include <dev/mps/mps_sas.h>
   90 
   91 #define MPSSAS_DISCOVERY_TIMEOUT        20
   92 #define MPSSAS_MAX_DISCOVERY_TIMEOUTS   10 /* 200 seconds */
   93 
   94 /*
   95  * static array to check SCSI OpCode for EEDP protection bits
   96  */
   97 #define PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
   98 #define PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
   99 #define PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
  100 static uint8_t op_code_prot[256] = {
  101         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  102         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  103         0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
  104         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  105         0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  106         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  107         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  108         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  109         0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
  110         0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  111         0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
  112         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  113         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  114         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  115         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  116         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
  117 };
  118 
  119 MALLOC_DEFINE(M_MPSSAS, "MPSSAS", "MPS SAS memory");
  120 
  121 static void mpssas_remove_device(struct mps_softc *, struct mps_command *);
  122 static void mpssas_remove_complete(struct mps_softc *, struct mps_command *);
  123 static void mpssas_action(struct cam_sim *sim, union ccb *ccb);
  124 static void mpssas_poll(struct cam_sim *sim);
  125 static int mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm,
  126     struct mps_command *cm);
  127 static void mpssas_scsiio_timeout(void *data);
  128 static void mpssas_abort_complete(struct mps_softc *sc, struct mps_command *cm);
  129 static void mpssas_direct_drive_io(struct mpssas_softc *sassc,
  130     struct mps_command *cm, union ccb *ccb);
  131 static void mpssas_action_scsiio(struct mpssas_softc *, union ccb *);
  132 static void mpssas_scsiio_complete(struct mps_softc *, struct mps_command *);
  133 static void mpssas_action_resetdev(struct mpssas_softc *, union ccb *);
  134 #if __FreeBSD_version >= 900026
  135 static void mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm);
  136 static void mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb,
  137                                uint64_t sasaddr);
  138 static void mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb);
  139 #endif //FreeBSD_version >= 900026
  140 static void mpssas_resetdev_complete(struct mps_softc *, struct mps_command *);
  141 static void mpssas_async(void *callback_arg, uint32_t code,
  142                          struct cam_path *path, void *arg);
  143 #if (__FreeBSD_version < 901503) || \
  144     ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
  145 static void mpssas_check_eedp(struct mps_softc *sc, struct cam_path *path,
  146                               struct ccb_getdev *cgd);
  147 static void mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb);
  148 #endif
  149 static int mpssas_send_portenable(struct mps_softc *sc);
  150 static void mpssas_portenable_complete(struct mps_softc *sc,
  151     struct mps_command *cm);
  152 
  153 struct mpssas_target *
  154 mpssas_find_target_by_handle(struct mpssas_softc *sassc, int start, uint16_t handle)
  155 {
  156         struct mpssas_target *target;
  157         int i;
  158 
  159         for (i = start; i < sassc->maxtargets; i++) {
  160                 target = &sassc->targets[i];
  161                 if (target->handle == handle)
  162                         return (target);
  163         }
  164 
  165         return (NULL);
  166 }
  167 
  168 /* we need to freeze the simq during attach and diag reset, to avoid failing
  169  * commands before device handles have been found by discovery.  Since
  170  * discovery involves reading config pages and possibly sending commands,
  171  * discovery actions may continue even after we receive the end of discovery
  172  * event, so refcount discovery actions instead of assuming we can unfreeze
  173  * the simq when we get the event.
  174  */
  175 void
  176 mpssas_startup_increment(struct mpssas_softc *sassc)
  177 {
  178         MPS_FUNCTRACE(sassc->sc);
  179 
  180         if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
  181                 if (sassc->startup_refcount++ == 0) {
  182                         /* just starting, freeze the simq */
  183                         mps_dprint(sassc->sc, MPS_INIT,
  184                             "%s freezing simq\n", __func__);
  185 #if __FreeBSD_version >= 1000039
  186                         xpt_hold_boot();
  187 #endif
  188                         xpt_freeze_simq(sassc->sim, 1);
  189                 }
  190                 mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
  191                     sassc->startup_refcount);
  192         }
  193 }
  194 
  195 void
  196 mpssas_release_simq_reinit(struct mpssas_softc *sassc)
  197 {
  198         if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
  199                 sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
  200                 xpt_release_simq(sassc->sim, 1);
  201                 mps_dprint(sassc->sc, MPS_INFO, "Unfreezing SIM queue\n");
  202         }
  203 }
  204 
  205 void
  206 mpssas_startup_decrement(struct mpssas_softc *sassc)
  207 {
  208         MPS_FUNCTRACE(sassc->sc);
  209 
  210         if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
  211                 if (--sassc->startup_refcount == 0) {
  212                         /* finished all discovery-related actions, release
  213                          * the simq and rescan for the latest topology.
  214                          */
  215                         mps_dprint(sassc->sc, MPS_INIT,
  216                             "%s releasing simq\n", __func__);
  217                         sassc->flags &= ~MPSSAS_IN_STARTUP;
  218                         xpt_release_simq(sassc->sim, 1);
  219 #if __FreeBSD_version >= 1000039
  220                         xpt_release_boot();
  221 #else
  222                         mpssas_rescan_target(sassc->sc, NULL);
  223 #endif
  224                 }
  225                 mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
  226                     sassc->startup_refcount);
  227         }
  228 }
  229 
  230 /* The firmware requires us to stop sending commands when we're doing task
  231  * management, so refcount the TMs and keep the simq frozen when any are in
  232  * use.
  233  */
  234 struct mps_command *
  235 mpssas_alloc_tm(struct mps_softc *sc)
  236 {
  237         struct mps_command *tm;
  238 
  239         tm = mps_alloc_high_priority_command(sc);
  240         return tm;
  241 }
  242 
  243 void
  244 mpssas_free_tm(struct mps_softc *sc, struct mps_command *tm)
  245 {
  246         int target_id = 0xFFFFFFFF;
  247  
  248         if (tm == NULL)
  249                 return;
  250 
  251         /*
  252          * For TM's the devq is frozen for the device.  Unfreeze it here and
  253          * free the resources used for freezing the devq.  Must clear the
  254          * INRESET flag as well or scsi I/O will not work.
  255          */
  256         if (tm->cm_targ != NULL) {
  257                 tm->cm_targ->flags &= ~MPSSAS_TARGET_INRESET;
  258                 target_id = tm->cm_targ->tid;
  259         }
  260         if (tm->cm_ccb) {
  261                 mps_dprint(sc, MPS_INFO, "Unfreezing devq for target ID %d\n",
  262                     target_id);
  263                 xpt_release_devq(tm->cm_ccb->ccb_h.path, 1, TRUE);
  264                 xpt_free_path(tm->cm_ccb->ccb_h.path);
  265                 xpt_free_ccb(tm->cm_ccb);
  266         }
  267 
  268         mps_free_high_priority_command(sc, tm);
  269 }
  270 
  271 void
  272 mpssas_rescan_target(struct mps_softc *sc, struct mpssas_target *targ)
  273 {
  274         struct mpssas_softc *sassc = sc->sassc;
  275         path_id_t pathid;
  276         target_id_t targetid;
  277         union ccb *ccb;
  278 
  279         MPS_FUNCTRACE(sc);
  280         pathid = cam_sim_path(sassc->sim);
  281         if (targ == NULL)
  282                 targetid = CAM_TARGET_WILDCARD;
  283         else
  284                 targetid = targ - sassc->targets;
  285 
  286         /*
  287          * Allocate a CCB and schedule a rescan.
  288          */
  289         ccb = xpt_alloc_ccb_nowait();
  290         if (ccb == NULL) {
  291                 mps_dprint(sc, MPS_ERROR, "unable to alloc CCB for rescan\n");
  292                 return;
  293         }
  294 
  295         if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid,
  296             targetid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
  297                 mps_dprint(sc, MPS_ERROR, "unable to create path for rescan\n");
  298                 xpt_free_ccb(ccb);
  299                 return;
  300         }
  301 
  302         if (targetid == CAM_TARGET_WILDCARD)
  303                 ccb->ccb_h.func_code = XPT_SCAN_BUS;
  304         else
  305                 ccb->ccb_h.func_code = XPT_SCAN_TGT;     
  306 
  307         mps_dprint(sc, MPS_TRACE, "%s targetid %u\n", __func__, targetid);
  308         xpt_rescan(ccb);
  309 }
  310 
  311 static void
  312 mpssas_log_command(struct mps_command *cm, u_int level, const char *fmt, ...)
  313 {
  314         struct sbuf sb;
  315         va_list ap;
  316         char str[192];
  317         char path_str[64];
  318 
  319         if (cm == NULL)
  320                 return;
  321 
  322         /* No need to be in here if debugging isn't enabled */
  323         if ((cm->cm_sc->mps_debug & level) == 0)
  324                 return;
  325 
  326         sbuf_new(&sb, str, sizeof(str), 0);
  327 
  328         va_start(ap, fmt);
  329 
  330         if (cm->cm_ccb != NULL) {
  331                 xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
  332                                 sizeof(path_str));
  333                 sbuf_cat(&sb, path_str);
  334                 if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
  335                         scsi_command_string(&cm->cm_ccb->csio, &sb);
  336                         sbuf_printf(&sb, "length %d ",
  337                                     cm->cm_ccb->csio.dxfer_len);
  338                 }
  339         }
  340         else {
  341                 sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
  342                     cam_sim_name(cm->cm_sc->sassc->sim),
  343                     cam_sim_unit(cm->cm_sc->sassc->sim),
  344                     cam_sim_bus(cm->cm_sc->sassc->sim),
  345                     cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
  346                     cm->cm_lun);
  347         }
  348 
  349         sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
  350         sbuf_vprintf(&sb, fmt, ap);
  351         sbuf_finish(&sb);
  352         mps_print_field(cm->cm_sc, "%s", sbuf_data(&sb));
  353 
  354         va_end(ap);
  355 }
  356 
  357 
  358 static void
  359 mpssas_remove_volume(struct mps_softc *sc, struct mps_command *tm)
  360 {
  361         MPI2_SCSI_TASK_MANAGE_REPLY *reply;
  362         struct mpssas_target *targ;
  363         uint16_t handle;
  364 
  365         MPS_FUNCTRACE(sc);
  366 
  367         reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
  368         handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
  369         targ = tm->cm_targ;
  370 
  371         if (reply == NULL) {
  372                 /* XXX retry the remove after the diag reset completes? */
  373                 mps_dprint(sc, MPS_FAULT,
  374                     "%s NULL reply resetting device 0x%04x\n", __func__,
  375                     handle);
  376                 mpssas_free_tm(sc, tm);
  377                 return;
  378         }
  379 
  380         if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
  381             MPI2_IOCSTATUS_SUCCESS) {
  382                 mps_dprint(sc, MPS_ERROR,
  383                    "IOCStatus = 0x%x while resetting device 0x%x\n",
  384                    le16toh(reply->IOCStatus), handle);
  385         }
  386 
  387         mps_dprint(sc, MPS_XINFO,
  388             "Reset aborted %u commands\n", reply->TerminationCount);
  389         mps_free_reply(sc, tm->cm_reply_data);
  390         tm->cm_reply = NULL;    /* Ensures the reply won't get re-freed */
  391 
  392         mps_dprint(sc, MPS_XINFO,
  393             "clearing target %u handle 0x%04x\n", targ->tid, handle);
  394         
  395         /*
  396          * Don't clear target if remove fails because things will get confusing.
  397          * Leave the devname and sasaddr intact so that we know to avoid reusing
  398          * this target id if possible, and so we can assign the same target id
  399          * to this device if it comes back in the future.
  400          */
  401         if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
  402             MPI2_IOCSTATUS_SUCCESS) {
  403                 targ = tm->cm_targ;
  404                 targ->handle = 0x0;
  405                 targ->encl_handle = 0x0;
  406                 targ->encl_slot = 0x0;
  407                 targ->exp_dev_handle = 0x0;
  408                 targ->phy_num = 0x0;
  409                 targ->linkrate = 0x0;
  410                 targ->devinfo = 0x0;
  411                 targ->flags = 0x0;
  412         }
  413 
  414         mpssas_free_tm(sc, tm);
  415 }
  416 
  417 
  418 /*
  419  * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
  420  * Otherwise Volume Delete is same as Bare Drive Removal.
  421  */
  422 void
  423 mpssas_prepare_volume_remove(struct mpssas_softc *sassc, uint16_t handle)
  424 {
  425         MPI2_SCSI_TASK_MANAGE_REQUEST *req;
  426         struct mps_softc *sc;
  427         struct mps_command *cm;
  428         struct mpssas_target *targ = NULL;
  429 
  430         MPS_FUNCTRACE(sassc->sc);
  431         sc = sassc->sc;
  432 
  433 #ifdef WD_SUPPORT
  434         /*
  435          * If this is a WD controller, determine if the disk should be exposed
  436          * to the OS or not.  If disk should be exposed, return from this
  437          * function without doing anything.
  438          */
  439         if (sc->WD_available && (sc->WD_hide_expose ==
  440             MPS_WD_EXPOSE_ALWAYS)) {
  441                 return;
  442         }
  443 #endif //WD_SUPPORT
  444 
  445         targ = mpssas_find_target_by_handle(sassc, 0, handle);
  446         if (targ == NULL) {
  447                 /* FIXME: what is the action? */
  448                 /* We don't know about this device? */
  449                 mps_dprint(sc, MPS_ERROR,
  450                    "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
  451                 return;
  452         }
  453 
  454         targ->flags |= MPSSAS_TARGET_INREMOVAL;
  455 
  456         cm = mpssas_alloc_tm(sc);
  457         if (cm == NULL) {
  458                 mps_dprint(sc, MPS_ERROR,
  459                     "%s: command alloc failure\n", __func__);
  460                 return;
  461         }
  462 
  463         mpssas_rescan_target(sc, targ);
  464 
  465         req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
  466         req->DevHandle = targ->handle;
  467         req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
  468         req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
  469 
  470         /* SAS Hard Link Reset / SATA Link Reset */
  471         req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
  472 
  473         cm->cm_targ = targ;
  474         cm->cm_data = NULL;
  475         cm->cm_desc.HighPriority.RequestFlags =
  476             MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
  477         cm->cm_complete = mpssas_remove_volume;
  478         cm->cm_complete_data = (void *)(uintptr_t)handle;
  479 
  480         mps_dprint(sc, MPS_INFO, "%s: Sending reset for target ID %d\n",
  481             __func__, targ->tid);
  482         mpssas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
  483 
  484         mps_map_command(sc, cm);
  485 }
  486 
  487 /*
  488  * The MPT2 firmware performs debounce on the link to avoid transient link
  489  * errors and false removals.  When it does decide that link has been lost
  490  * and a device need to go away, it expects that the host will perform a
  491  * target reset and then an op remove.  The reset has the side-effect of
  492  * aborting any outstanding requests for the device, which is required for
  493  * the op-remove to succeed.  It's not clear if the host should check for
  494  * the device coming back alive after the reset.
  495  */
  496 void
  497 mpssas_prepare_remove(struct mpssas_softc *sassc, uint16_t handle)
  498 {
  499         MPI2_SCSI_TASK_MANAGE_REQUEST *req;
  500         struct mps_softc *sc;
  501         struct mps_command *cm;
  502         struct mpssas_target *targ = NULL;
  503 
  504         MPS_FUNCTRACE(sassc->sc);
  505 
  506         sc = sassc->sc;
  507 
  508         targ = mpssas_find_target_by_handle(sassc, 0, handle);
  509         if (targ == NULL) {
  510                 /* FIXME: what is the action? */
  511                 /* We don't know about this device? */
  512                 mps_dprint(sc, MPS_ERROR,
  513                     "%s : invalid handle 0x%x \n", __func__, handle);
  514                 return;
  515         }
  516 
  517         targ->flags |= MPSSAS_TARGET_INREMOVAL;
  518 
  519         cm = mpssas_alloc_tm(sc);
  520         if (cm == NULL) {
  521                 mps_dprint(sc, MPS_ERROR,
  522                     "%s: command alloc failure\n", __func__);
  523                 return;
  524         }
  525 
  526         mpssas_rescan_target(sc, targ);
  527 
  528         req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
  529         memset(req, 0, sizeof(*req));
  530         req->DevHandle = htole16(targ->handle);
  531         req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
  532         req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
  533 
  534         /* SAS Hard Link Reset / SATA Link Reset */
  535         req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
  536 
  537         cm->cm_targ = targ;
  538         cm->cm_data = NULL;
  539         cm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
  540         cm->cm_complete = mpssas_remove_device;
  541         cm->cm_complete_data = (void *)(uintptr_t)handle;
  542 
  543         mps_dprint(sc, MPS_INFO, "%s: Sending reset for target ID %d\n",
  544             __func__, targ->tid);
  545         mpssas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
  546 
  547         mps_map_command(sc, cm);
  548 }
  549 
  550 static void
  551 mpssas_remove_device(struct mps_softc *sc, struct mps_command *tm)
  552 {
  553         MPI2_SCSI_TASK_MANAGE_REPLY *reply;
  554         MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
  555         struct mpssas_target *targ;
  556         struct mps_command *next_cm;
  557         uint16_t handle;
  558 
  559         MPS_FUNCTRACE(sc);
  560 
  561         reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
  562         handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
  563         targ = tm->cm_targ;
  564 
  565         /*
  566          * Currently there should be no way we can hit this case.  It only
  567          * happens when we have a failure to allocate chain frames, and
  568          * task management commands don't have S/G lists.
  569          */
  570         if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
  571                 mps_dprint(sc, MPS_ERROR,
  572                     "%s: cm_flags = %#x for remove of handle %#04x! "
  573                     "This should not happen!\n", __func__, tm->cm_flags,
  574                     handle);
  575         }
  576 
  577         if (reply == NULL) {
  578                 /* XXX retry the remove after the diag reset completes? */
  579                 mps_dprint(sc, MPS_FAULT,
  580                     "%s NULL reply resetting device 0x%04x\n", __func__,
  581                     handle);
  582                 mpssas_free_tm(sc, tm);
  583                 return;
  584         }
  585 
  586         if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
  587             MPI2_IOCSTATUS_SUCCESS) {
  588                 mps_dprint(sc, MPS_ERROR,
  589                    "IOCStatus = 0x%x while resetting device 0x%x\n",
  590                    le16toh(reply->IOCStatus), handle);
  591         }
  592 
  593         mps_dprint(sc, MPS_XINFO, "Reset aborted %u commands\n",
  594             le32toh(reply->TerminationCount));
  595         mps_free_reply(sc, tm->cm_reply_data);
  596         tm->cm_reply = NULL;    /* Ensures the reply won't get re-freed */
  597 
  598         /* Reuse the existing command */
  599         req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
  600         memset(req, 0, sizeof(*req));
  601         req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
  602         req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
  603         req->DevHandle = htole16(handle);
  604         tm->cm_data = NULL;
  605         tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
  606         tm->cm_complete = mpssas_remove_complete;
  607         tm->cm_complete_data = (void *)(uintptr_t)handle;
  608 
  609         mps_map_command(sc, tm);
  610 
  611         mps_dprint(sc, MPS_XINFO, "clearing target %u handle 0x%04x\n",
  612                    targ->tid, handle);
  613         TAILQ_FOREACH_SAFE(tm, &targ->commands, cm_link, next_cm) {
  614                 union ccb *ccb;
  615 
  616                 mps_dprint(sc, MPS_XINFO, "Completing missed command %p\n", tm);
  617                 ccb = tm->cm_complete_data;
  618                 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
  619                 mpssas_scsiio_complete(sc, tm);
  620         }
  621 }
  622 
  623 static void
  624 mpssas_remove_complete(struct mps_softc *sc, struct mps_command *tm)
  625 {
  626         MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
  627         uint16_t handle;
  628         struct mpssas_target *targ;
  629         struct mpssas_lun *lun;
  630 
  631         MPS_FUNCTRACE(sc);
  632 
  633         reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
  634         handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
  635 
  636         /*
  637          * Currently there should be no way we can hit this case.  It only
  638          * happens when we have a failure to allocate chain frames, and
  639          * task management commands don't have S/G lists.
  640          */
  641         if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
  642                 mps_dprint(sc, MPS_XINFO,
  643                            "%s: cm_flags = %#x for remove of handle %#04x! "
  644                            "This should not happen!\n", __func__, tm->cm_flags,
  645                            handle);
  646                 mpssas_free_tm(sc, tm);
  647                 return;
  648         }
  649 
  650         if (reply == NULL) {
  651                 /* most likely a chip reset */
  652                 mps_dprint(sc, MPS_FAULT,
  653                     "%s NULL reply removing device 0x%04x\n", __func__, handle);
  654                 mpssas_free_tm(sc, tm);
  655                 return;
  656         }
  657 
  658         mps_dprint(sc, MPS_XINFO,
  659             "%s on handle 0x%04x, IOCStatus= 0x%x\n", __func__, 
  660             handle, le16toh(reply->IOCStatus));
  661 
  662         /*
  663          * Don't clear target if remove fails because things will get confusing.
  664          * Leave the devname and sasaddr intact so that we know to avoid reusing
  665          * this target id if possible, and so we can assign the same target id
  666          * to this device if it comes back in the future.
  667          */
  668         if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
  669             MPI2_IOCSTATUS_SUCCESS) {
  670                 targ = tm->cm_targ;
  671                 targ->handle = 0x0;
  672                 targ->encl_handle = 0x0;
  673                 targ->encl_slot = 0x0;
  674                 targ->exp_dev_handle = 0x0;
  675                 targ->phy_num = 0x0;
  676                 targ->linkrate = 0x0;
  677                 targ->devinfo = 0x0;
  678                 targ->flags = 0x0;
  679                 
  680                 while(!SLIST_EMPTY(&targ->luns)) {
  681                         lun = SLIST_FIRST(&targ->luns);
  682                         SLIST_REMOVE_HEAD(&targ->luns, lun_link);
  683                         free(lun, M_MPT2);
  684                 }
  685         }
  686         
  687 
  688         mpssas_free_tm(sc, tm);
  689 }
  690 
  691 static int
  692 mpssas_register_events(struct mps_softc *sc)
  693 {
  694         u32 events[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS];
  695 
  696         bzero(events, 16);
  697         setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
  698         setbit(events, MPI2_EVENT_SAS_DISCOVERY);
  699         setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
  700         setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
  701         setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
  702         setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
  703         setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
  704         setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
  705         setbit(events, MPI2_EVENT_IR_VOLUME);
  706         setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
  707         setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
  708         setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED);
  709 
  710         mps_register_events(sc, events, mpssas_evt_handler, NULL,
  711             &sc->sassc->mpssas_eh);
  712 
  713         return (0);
  714 }
  715 
  716 int
  717 mps_attach_sas(struct mps_softc *sc)
  718 {
  719         struct mpssas_softc *sassc;
  720         cam_status status;
  721         int unit, error = 0, reqs;
  722 
  723         MPS_FUNCTRACE(sc);
  724         mps_dprint(sc, MPS_INIT, "%s entered\n", __func__);
  725 
  726         sassc = malloc(sizeof(struct mpssas_softc), M_MPT2, M_WAITOK|M_ZERO);
  727         if(!sassc) {
  728                 mps_dprint(sc, MPS_INIT|MPS_ERROR,
  729                     "Cannot allocate SAS controller memory\n");
  730                 return (ENOMEM);
  731         }
  732 
  733         /*
  734          * XXX MaxTargets could change during a reinit.  Since we don't
  735          * resize the targets[] array during such an event, cache the value
  736          * of MaxTargets here so that we don't get into trouble later.  This
  737          * should move into the reinit logic.
  738          */
  739         sassc->maxtargets = sc->facts->MaxTargets + sc->facts->MaxVolumes;
  740         sassc->targets = malloc(sizeof(struct mpssas_target) *
  741             sassc->maxtargets, M_MPT2, M_WAITOK|M_ZERO);
  742         if(!sassc->targets) {
  743                 mps_dprint(sc, MPS_INIT|MPS_ERROR,
  744                     "Cannot allocate SAS target memory\n");
  745                 free(sassc, M_MPT2);
  746                 return (ENOMEM);
  747         }
  748         sc->sassc = sassc;
  749         sassc->sc = sc;
  750 
  751         reqs = sc->num_reqs - sc->num_prireqs - 1;
  752         if ((sassc->devq = cam_simq_alloc(reqs)) == NULL) {
  753                 mps_dprint(sc, MPS_ERROR, "Cannot allocate SIMQ\n");
  754                 error = ENOMEM;
  755                 goto out;
  756         }
  757 
  758         unit = device_get_unit(sc->mps_dev);
  759         sassc->sim = cam_sim_alloc(mpssas_action, mpssas_poll, "mps", sassc,
  760             unit, &sc->mps_mtx, reqs, reqs, sassc->devq);
  761         if (sassc->sim == NULL) {
  762                 mps_dprint(sc, MPS_INIT|MPS_ERROR, "Cannot allocate SIM\n");
  763                 error = EINVAL;
  764                 goto out;
  765         }
  766 
  767         TAILQ_INIT(&sassc->ev_queue);
  768 
  769         /* Initialize taskqueue for Event Handling */
  770         TASK_INIT(&sassc->ev_task, 0, mpssas_firmware_event_work, sc);
  771         sassc->ev_tq = taskqueue_create("mps_taskq", M_NOWAIT | M_ZERO,
  772             taskqueue_thread_enqueue, &sassc->ev_tq);
  773         taskqueue_start_threads(&sassc->ev_tq, 1, PRIBIO, "%s taskq", 
  774             device_get_nameunit(sc->mps_dev));
  775 
  776         mps_lock(sc);
  777 
  778         /*
  779          * XXX There should be a bus for every port on the adapter, but since
  780          * we're just going to fake the topology for now, we'll pretend that
  781          * everything is just a target on a single bus.
  782          */
  783         if ((error = xpt_bus_register(sassc->sim, sc->mps_dev, 0)) != 0) {
  784                 mps_dprint(sc, MPS_INIT|MPS_ERROR,
  785                     "Error %d registering SCSI bus\n", error);
  786                 mps_unlock(sc);
  787                 goto out;
  788         }
  789 
  790         /*
  791          * Assume that discovery events will start right away.
  792          *
  793          * Hold off boot until discovery is complete.
  794          */
  795         sassc->flags |= MPSSAS_IN_STARTUP | MPSSAS_IN_DISCOVERY;
  796         sc->sassc->startup_refcount = 0;
  797         mpssas_startup_increment(sassc);
  798 
  799         callout_init(&sassc->discovery_callout, 1 /*mpsafe*/);
  800 
  801         /*
  802          * Register for async events so we can determine the EEDP
  803          * capabilities of devices.
  804          */
  805         status = xpt_create_path(&sassc->path, /*periph*/NULL,
  806             cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
  807             CAM_LUN_WILDCARD);
  808         if (status != CAM_REQ_CMP) {
  809                 mps_dprint(sc, MPS_ERROR|MPS_INIT,
  810                     "Error %#x creating sim path\n", status);
  811                 sassc->path = NULL;
  812         } else {
  813                 int event;
  814 
  815 #if (__FreeBSD_version >= 1000006) || \
  816     ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
  817                 event = AC_ADVINFO_CHANGED;
  818 #else
  819                 event = AC_FOUND_DEVICE;
  820 #endif
  821                 status = xpt_register_async(event, mpssas_async, sc,
  822                                             sassc->path);
  823                 if (status != CAM_REQ_CMP) {
  824                         mps_dprint(sc, MPS_ERROR,
  825                             "Error %#x registering async handler for "
  826                             "AC_ADVINFO_CHANGED events\n", status);
  827                         xpt_free_path(sassc->path);
  828                         sassc->path = NULL;
  829                 }
  830         }
  831         if (status != CAM_REQ_CMP) {
  832                 /*
  833                  * EEDP use is the exception, not the rule.
  834                  * Warn the user, but do not fail to attach.
  835                  */
  836                 mps_printf(sc, "EEDP capabilities disabled.\n");
  837         }
  838 
  839         mps_unlock(sc);
  840 
  841         mpssas_register_events(sc);
  842 out:
  843         if (error)
  844                 mps_detach_sas(sc);
  845 
  846         mps_dprint(sc, MPS_INIT, "%s exit error= %d\n", __func__, error);
  847         return (error);
  848 }
  849 
  850 int
  851 mps_detach_sas(struct mps_softc *sc)
  852 {
  853         struct mpssas_softc *sassc;
  854         struct mpssas_lun *lun, *lun_tmp;
  855         struct mpssas_target *targ;
  856         int i;
  857 
  858         MPS_FUNCTRACE(sc);
  859 
  860         if (sc->sassc == NULL)
  861                 return (0);
  862 
  863         sassc = sc->sassc;
  864         mps_deregister_events(sc, sassc->mpssas_eh);
  865 
  866         /*
  867          * Drain and free the event handling taskqueue with the lock
  868          * unheld so that any parallel processing tasks drain properly
  869          * without deadlocking.
  870          */
  871         if (sassc->ev_tq != NULL)
  872                 taskqueue_free(sassc->ev_tq);
  873 
  874         /* Make sure CAM doesn't wedge if we had to bail out early. */
  875         mps_lock(sc);
  876 
  877         while (sassc->startup_refcount != 0)
  878                 mpssas_startup_decrement(sassc);
  879 
  880         /* Deregister our async handler */
  881         if (sassc->path != NULL) {
  882                 xpt_register_async(0, mpssas_async, sc, sassc->path);
  883                 xpt_free_path(sassc->path);
  884                 sassc->path = NULL;
  885         }
  886 
  887         if (sassc->flags & MPSSAS_IN_STARTUP)
  888                 xpt_release_simq(sassc->sim, 1);
  889 
  890         if (sassc->sim != NULL) {
  891                 xpt_bus_deregister(cam_sim_path(sassc->sim));
  892                 cam_sim_free(sassc->sim, FALSE);
  893         }
  894 
  895         mps_unlock(sc);
  896 
  897         if (sassc->devq != NULL)
  898                 cam_simq_free(sassc->devq);
  899 
  900         for(i=0; i< sassc->maxtargets ;i++) {
  901                 targ = &sassc->targets[i];
  902                 SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
  903                         free(lun, M_MPT2);
  904                 }
  905         }
  906         free(sassc->targets, M_MPT2);
  907         free(sassc, M_MPT2);
  908         sc->sassc = NULL;
  909 
  910         return (0);
  911 }
  912 
  913 void
  914 mpssas_discovery_end(struct mpssas_softc *sassc)
  915 {
  916         struct mps_softc *sc = sassc->sc;
  917 
  918         MPS_FUNCTRACE(sc);
  919 
  920         if (sassc->flags & MPSSAS_DISCOVERY_TIMEOUT_PENDING)
  921                 callout_stop(&sassc->discovery_callout);
  922 
  923         /*
  924          * After discovery has completed, check the mapping table for any
  925          * missing devices and update their missing counts. Only do this once
  926          * whenever the driver is initialized so that missing counts aren't
  927          * updated unnecessarily. Note that just because discovery has
  928          * completed doesn't mean that events have been processed yet. The
  929          * check_devices function is a callout timer that checks if ALL devices
  930          * are missing. If so, it will wait a little longer for events to
  931          * complete and keep resetting itself until some device in the mapping
  932          * table is not missing, meaning that event processing has started.
  933          */
  934         if (sc->track_mapping_events) {
  935                 mps_dprint(sc, MPS_XINFO | MPS_MAPPING, "Discovery has "
  936                     "completed. Check for missing devices in the mapping "
  937                     "table.\n");
  938                 callout_reset(&sc->device_check_callout,
  939                     MPS_MISSING_CHECK_DELAY * hz, mps_mapping_check_devices,
  940                     sc);
  941         }
  942 }
  943 
  944 static void
  945 mpssas_action(struct cam_sim *sim, union ccb *ccb)
  946 {
  947         struct mpssas_softc *sassc;
  948 
  949         sassc = cam_sim_softc(sim);
  950 
  951         MPS_FUNCTRACE(sassc->sc);
  952         mps_dprint(sassc->sc, MPS_TRACE, "ccb func_code 0x%x\n",
  953             ccb->ccb_h.func_code);
  954         mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
  955 
  956         switch (ccb->ccb_h.func_code) {
  957         case XPT_PATH_INQ:
  958         {
  959                 struct ccb_pathinq *cpi = &ccb->cpi;
  960                 struct mps_softc *sc = sassc->sc;
  961 
  962                 cpi->version_num = 1;
  963                 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
  964                 cpi->target_sprt = 0;
  965 #if __FreeBSD_version >= 1000039
  966                 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
  967 #else
  968                 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
  969 #endif
  970                 cpi->hba_eng_cnt = 0;
  971                 cpi->max_target = sassc->maxtargets - 1;
  972                 cpi->max_lun = 255;
  973 
  974                 /*
  975                  * initiator_id is set here to an ID outside the set of valid
  976                  * target IDs (including volumes).
  977                  */
  978                 cpi->initiator_id = sassc->maxtargets;
  979                 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
  980                 strlcpy(cpi->hba_vid, "Avago Tech", HBA_IDLEN);
  981                 strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
  982                 cpi->unit_number = cam_sim_unit(sim);
  983                 cpi->bus_id = cam_sim_bus(sim);
  984                 cpi->base_transfer_speed = 150000;
  985                 cpi->transport = XPORT_SAS;
  986                 cpi->transport_version = 0;
  987                 cpi->protocol = PROTO_SCSI;
  988                 cpi->protocol_version = SCSI_REV_SPC;
  989                 cpi->maxio = sc->maxio;
  990                 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
  991                 break;
  992         }
  993         case XPT_GET_TRAN_SETTINGS:
  994         {
  995                 struct ccb_trans_settings       *cts;
  996                 struct ccb_trans_settings_sas   *sas;
  997                 struct ccb_trans_settings_scsi  *scsi;
  998                 struct mpssas_target *targ;
  999 
 1000                 cts = &ccb->cts;
 1001                 sas = &cts->xport_specific.sas;
 1002                 scsi = &cts->proto_specific.scsi;
 1003 
 1004                 KASSERT(cts->ccb_h.target_id < sassc->maxtargets,
 1005                     ("Target %d out of bounds in XPT_GET_TRANS_SETTINGS\n",
 1006                     cts->ccb_h.target_id));
 1007                 targ = &sassc->targets[cts->ccb_h.target_id];
 1008                 if (targ->handle == 0x0) {
 1009                         mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
 1010                         break;
 1011                 }
 1012 
 1013                 cts->protocol_version = SCSI_REV_SPC2;
 1014                 cts->transport = XPORT_SAS;
 1015                 cts->transport_version = 0;
 1016 
 1017                 sas->valid = CTS_SAS_VALID_SPEED;
 1018                 switch (targ->linkrate) {
 1019                 case 0x08:
 1020                         sas->bitrate = 150000;
 1021                         break;
 1022                 case 0x09:
 1023                         sas->bitrate = 300000;
 1024                         break;
 1025                 case 0x0a:
 1026                         sas->bitrate = 600000;
 1027                         break;
 1028                 default:
 1029                         sas->valid = 0;
 1030                 }
 1031 
 1032                 cts->protocol = PROTO_SCSI;
 1033                 scsi->valid = CTS_SCSI_VALID_TQ;
 1034                 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
 1035 
 1036                 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
 1037                 break;
 1038         }
 1039         case XPT_CALC_GEOMETRY:
 1040                 cam_calc_geometry(&ccb->ccg, /*extended*/1);
 1041                 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
 1042                 break;
 1043         case XPT_RESET_DEV:
 1044                 mps_dprint(sassc->sc, MPS_XINFO, "mpssas_action XPT_RESET_DEV\n");
 1045                 mpssas_action_resetdev(sassc, ccb);
 1046                 return;
 1047         case XPT_RESET_BUS:
 1048         case XPT_ABORT:
 1049         case XPT_TERM_IO:
 1050                 mps_dprint(sassc->sc, MPS_XINFO,
 1051                     "mpssas_action faking success for abort or reset\n");
 1052                 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
 1053                 break;
 1054         case XPT_SCSI_IO:
 1055                 mpssas_action_scsiio(sassc, ccb);
 1056                 return;
 1057 #if __FreeBSD_version >= 900026
 1058         case XPT_SMP_IO:
 1059                 mpssas_action_smpio(sassc, ccb);
 1060                 return;
 1061 #endif
 1062         default:
 1063                 mpssas_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL);
 1064                 break;
 1065         }
 1066         xpt_done(ccb);
 1067 
 1068 }
 1069 
 1070 static void
 1071 mpssas_announce_reset(struct mps_softc *sc, uint32_t ac_code,
 1072     target_id_t target_id, lun_id_t lun_id)
 1073 {
 1074         path_id_t path_id = cam_sim_path(sc->sassc->sim);
 1075         struct cam_path *path;
 1076 
 1077         mps_dprint(sc, MPS_XINFO, "%s code %x target %d lun %jx\n", __func__,
 1078             ac_code, target_id, (uintmax_t)lun_id);
 1079 
 1080         if (xpt_create_path(&path, NULL, 
 1081                 path_id, target_id, lun_id) != CAM_REQ_CMP) {
 1082                 mps_dprint(sc, MPS_ERROR, "unable to create path for reset "
 1083                            "notification\n");
 1084                 return;
 1085         }
 1086 
 1087         xpt_async(ac_code, path, NULL);
 1088         xpt_free_path(path);
 1089 }
 1090 
 1091 static void 
 1092 mpssas_complete_all_commands(struct mps_softc *sc)
 1093 {
 1094         struct mps_command *cm;
 1095         int i;
 1096         int completed;
 1097 
 1098         MPS_FUNCTRACE(sc);
 1099         mtx_assert(&sc->mps_mtx, MA_OWNED);
 1100 
 1101         /* complete all commands with a NULL reply */
 1102         for (i = 1; i < sc->num_reqs; i++) {
 1103                 cm = &sc->commands[i];
 1104                 if (cm->cm_state == MPS_CM_STATE_FREE)
 1105                         continue;
 1106 
 1107                 cm->cm_state = MPS_CM_STATE_BUSY;
 1108                 cm->cm_reply = NULL;
 1109                 completed = 0;
 1110 
 1111                 if (cm->cm_flags & MPS_CM_FLAGS_POLLED)
 1112                         cm->cm_flags |= MPS_CM_FLAGS_COMPLETE;
 1113 
 1114                 if (cm->cm_complete != NULL) {
 1115                         mpssas_log_command(cm, MPS_RECOVERY,
 1116                             "completing cm %p state %x ccb %p for diag reset\n",
 1117                             cm, cm->cm_state, cm->cm_ccb);
 1118 
 1119                         cm->cm_complete(sc, cm);
 1120                         completed = 1;
 1121                 } else if (cm->cm_flags & MPS_CM_FLAGS_WAKEUP) {
 1122                         mpssas_log_command(cm, MPS_RECOVERY,
 1123                             "waking up cm %p state %x ccb %p for diag reset\n", 
 1124                             cm, cm->cm_state, cm->cm_ccb);
 1125                         wakeup(cm);
 1126                         completed = 1;
 1127                 }
 1128 
 1129                 if ((completed == 0) && (cm->cm_state != MPS_CM_STATE_FREE)) {
 1130                         /* this should never happen, but if it does, log */
 1131                         mpssas_log_command(cm, MPS_RECOVERY,
 1132                             "cm %p state %x flags 0x%x ccb %p during diag "
 1133                             "reset\n", cm, cm->cm_state, cm->cm_flags,
 1134                             cm->cm_ccb);
 1135                 }
 1136         }
 1137 
 1138         sc->io_cmds_active = 0;
 1139 }
 1140 
 1141 void
 1142 mpssas_handle_reinit(struct mps_softc *sc)
 1143 {
 1144         int i;
 1145 
 1146         /* Go back into startup mode and freeze the simq, so that CAM
 1147          * doesn't send any commands until after we've rediscovered all
 1148          * targets and found the proper device handles for them.
 1149          *
 1150          * After the reset, portenable will trigger discovery, and after all
 1151          * discovery-related activities have finished, the simq will be
 1152          * released.
 1153          */
 1154         mps_dprint(sc, MPS_INIT, "%s startup\n", __func__);
 1155         sc->sassc->flags |= MPSSAS_IN_STARTUP;
 1156         sc->sassc->flags |= MPSSAS_IN_DISCOVERY;
 1157         mpssas_startup_increment(sc->sassc);
 1158 
 1159         /* notify CAM of a bus reset */
 1160         mpssas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD, 
 1161             CAM_LUN_WILDCARD);
 1162 
 1163         /* complete and cleanup after all outstanding commands */
 1164         mpssas_complete_all_commands(sc);
 1165 
 1166         mps_dprint(sc, MPS_INIT,
 1167             "%s startup %u after command completion\n", __func__,
 1168             sc->sassc->startup_refcount);
 1169 
 1170         /* zero all the target handles, since they may change after the
 1171          * reset, and we have to rediscover all the targets and use the new
 1172          * handles.  
 1173          */
 1174         for (i = 0; i < sc->sassc->maxtargets; i++) {
 1175                 if (sc->sassc->targets[i].outstanding != 0)
 1176                         mps_dprint(sc, MPS_INIT, "target %u outstanding %u\n", 
 1177                             i, sc->sassc->targets[i].outstanding);
 1178                 sc->sassc->targets[i].handle = 0x0;
 1179                 sc->sassc->targets[i].exp_dev_handle = 0x0;
 1180                 sc->sassc->targets[i].outstanding = 0;
 1181                 sc->sassc->targets[i].flags = MPSSAS_TARGET_INDIAGRESET;
 1182         }
 1183 }
 1184 
 1185 static void
 1186 mpssas_tm_timeout(void *data)
 1187 {
 1188         struct mps_command *tm = data;
 1189         struct mps_softc *sc = tm->cm_sc;
 1190 
 1191         mtx_assert(&sc->mps_mtx, MA_OWNED);
 1192 
 1193         mpssas_log_command(tm, MPS_INFO|MPS_RECOVERY,
 1194             "task mgmt %p timed out\n", tm);
 1195 
 1196         KASSERT(tm->cm_state == MPS_CM_STATE_INQUEUE,
 1197             ("command not inqueue\n"));
 1198 
 1199         tm->cm_state = MPS_CM_STATE_BUSY;
 1200         mps_reinit(sc);
 1201 }
 1202 
 1203 static void
 1204 mpssas_logical_unit_reset_complete(struct mps_softc *sc, struct mps_command *tm)
 1205 {
 1206         MPI2_SCSI_TASK_MANAGE_REPLY *reply;
 1207         MPI2_SCSI_TASK_MANAGE_REQUEST *req;
 1208         unsigned int cm_count = 0;
 1209         struct mps_command *cm;
 1210         struct mpssas_target *targ;
 1211 
 1212         callout_stop(&tm->cm_callout);
 1213 
 1214         req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
 1215         reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
 1216         targ = tm->cm_targ;
 1217 
 1218         /*
 1219          * Currently there should be no way we can hit this case.  It only
 1220          * happens when we have a failure to allocate chain frames, and
 1221          * task management commands don't have S/G lists.
 1222          * XXXSL So should it be an assertion?
 1223          */
 1224         if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
 1225                 mps_dprint(sc, MPS_RECOVERY|MPS_ERROR,
 1226                     "%s: cm_flags = %#x for LUN reset! "
 1227                    "This should not happen!\n", __func__, tm->cm_flags);
 1228                 mpssas_free_tm(sc, tm);
 1229                 return;
 1230         }
 1231 
 1232         if (reply == NULL) {
 1233                 mps_dprint(sc, MPS_RECOVERY, "NULL reset reply for tm %p\n",
 1234                     tm);
 1235                 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
 1236                         /* this completion was due to a reset, just cleanup */
 1237                         mps_dprint(sc, MPS_RECOVERY, "Hardware undergoing "
 1238                             "reset, ignoring NULL LUN reset reply\n");
 1239                         targ->tm = NULL;
 1240                         mpssas_free_tm(sc, tm);
 1241                 }
 1242                 else {
 1243                         /* we should have gotten a reply. */
 1244                         mps_dprint(sc, MPS_INFO|MPS_RECOVERY, "NULL reply on "
 1245                             "LUN reset attempt, resetting controller\n");
 1246                         mps_reinit(sc);
 1247                 }
 1248                 return;
 1249         }
 1250 
 1251         mps_dprint(sc, MPS_RECOVERY,
 1252             "logical unit reset status 0x%x code 0x%x count %u\n",
 1253             le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
 1254             le32toh(reply->TerminationCount));
 1255                 
 1256         /*
 1257          * See if there are any outstanding commands for this LUN.
 1258          * This could be made more efficient by using a per-LU data
 1259          * structure of some sort.
 1260          */
 1261         TAILQ_FOREACH(cm, &targ->commands, cm_link) {
 1262                 if (cm->cm_lun == tm->cm_lun)
 1263                         cm_count++;
 1264         }
 1265 
 1266         if (cm_count == 0) {
 1267                 mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
 1268                     "Finished recovery after LUN reset for target %u\n",
 1269                     targ->tid);
 1270 
 1271                 mpssas_announce_reset(sc, AC_SENT_BDR, targ->tid, tm->cm_lun);
 1272 
 1273                 /*
 1274                  * We've finished recovery for this logical unit.  check and
 1275                  * see if some other logical unit has a timedout command
 1276                  * that needs to be processed.
 1277                  */
 1278                 cm = TAILQ_FIRST(&targ->timedout_commands);
 1279                 if (cm) {
 1280                         mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
 1281                             "More commands to abort for target %u\n",
 1282                             targ->tid);
 1283                         mpssas_send_abort(sc, tm, cm);
 1284                 } else {
 1285                         targ->tm = NULL;
 1286                         mpssas_free_tm(sc, tm);
 1287                 }
 1288         } else {
 1289                 /*
 1290                  * If we still have commands for this LUN, the reset
 1291                  * effectively failed, regardless of the status reported.
 1292                  * Escalate to a target reset.
 1293                  */
 1294                 mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
 1295                     "logical unit reset complete for target %u, but still "
 1296                     "have %u command(s), sending target reset\n", targ->tid,
 1297                     cm_count);
 1298                 mpssas_send_reset(sc, tm,
 1299                     MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
 1300         }
 1301 }
 1302 
 1303 static void
 1304 mpssas_target_reset_complete(struct mps_softc *sc, struct mps_command *tm)
 1305 {
 1306         MPI2_SCSI_TASK_MANAGE_REPLY *reply;
 1307         MPI2_SCSI_TASK_MANAGE_REQUEST *req;
 1308         struct mpssas_target *targ;
 1309 
 1310         callout_stop(&tm->cm_callout);
 1311 
 1312         req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
 1313         reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
 1314         targ = tm->cm_targ;
 1315 
 1316         /*
 1317          * Currently there should be no way we can hit this case.  It only
 1318          * happens when we have a failure to allocate chain frames, and
 1319          * task management commands don't have S/G lists.
 1320          */
 1321         if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
 1322                 mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x for target reset! "
 1323                            "This should not happen!\n", __func__, tm->cm_flags);
 1324                 mpssas_free_tm(sc, tm);
 1325                 return;
 1326         }
 1327 
 1328         if (reply == NULL) {
 1329                 mps_dprint(sc, MPS_RECOVERY,
 1330                     "NULL target reset reply for tm %pi TaskMID %u\n",
 1331                     tm, le16toh(req->TaskMID));
 1332                 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
 1333                         /* this completion was due to a reset, just cleanup */
 1334                         mps_dprint(sc, MPS_RECOVERY, "Hardware undergoing "
 1335                             "reset, ignoring NULL target reset reply\n");
 1336                         targ->tm = NULL;
 1337                         mpssas_free_tm(sc, tm);
 1338                 } else {
 1339                         /* we should have gotten a reply. */
 1340                         mps_dprint(sc, MPS_INFO|MPS_RECOVERY, "NULL reply on "
 1341                             "target reset attempt, resetting controller\n");
 1342                         mps_reinit(sc);
 1343                 }
 1344                 return;
 1345         }
 1346 
 1347         mps_dprint(sc, MPS_RECOVERY,
 1348             "target reset status 0x%x code 0x%x count %u\n",
 1349             le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
 1350             le32toh(reply->TerminationCount));
 1351 
 1352         if (targ->outstanding == 0) {
 1353                 /* we've finished recovery for this target and all
 1354                  * of its logical units.
 1355                  */
 1356                 mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
 1357                     "Finished reset recovery for target %u\n", targ->tid);
 1358 
 1359                 mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
 1360                     CAM_LUN_WILDCARD);
 1361 
 1362                 targ->tm = NULL;
 1363                 mpssas_free_tm(sc, tm);
 1364         } else {
 1365                 /*
 1366                  * After a target reset, if this target still has
 1367                  * outstanding commands, the reset effectively failed,
 1368                  * regardless of the status reported.  escalate.
 1369                  */
 1370                 mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
 1371                     "Target reset complete for target %u, but still have %u "
 1372                     "command(s), resetting controller\n", targ->tid,
 1373                     targ->outstanding);
 1374                 mps_reinit(sc);
 1375         }
 1376 }
 1377 
 1378 #define MPS_RESET_TIMEOUT 30
 1379 
 1380 int
 1381 mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type)
 1382 {
 1383         MPI2_SCSI_TASK_MANAGE_REQUEST *req;
 1384         struct mpssas_target *target;
 1385         int err;
 1386 
 1387         target = tm->cm_targ;
 1388         if (target->handle == 0) {
 1389                 mps_dprint(sc, MPS_ERROR,"%s null devhandle for target_id %d\n",
 1390                     __func__, target->tid);
 1391                 return -1;
 1392         }
 1393 
 1394         req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
 1395         req->DevHandle = htole16(target->handle);
 1396         req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
 1397         req->TaskType = type;
 1398 
 1399         if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
 1400                 /* XXX Need to handle invalid LUNs */
 1401                 MPS_SET_LUN(req->LUN, tm->cm_lun);
 1402                 tm->cm_targ->logical_unit_resets++;
 1403                 mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
 1404                     "Sending logical unit reset to target %u lun %d\n",
 1405                     target->tid, tm->cm_lun);
 1406                 tm->cm_complete = mpssas_logical_unit_reset_complete;
 1407                 mpssas_prepare_for_tm(sc, tm, target, tm->cm_lun);
 1408         } else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
 1409                 /*
 1410                  * Target reset method =
 1411                  *      SAS Hard Link Reset / SATA Link Reset
 1412                  */
 1413                 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
 1414                 tm->cm_targ->target_resets++;
 1415                 mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
 1416                     "Sending target reset to target %u\n", target->tid);
 1417                 tm->cm_complete = mpssas_target_reset_complete;
 1418                 mpssas_prepare_for_tm(sc, tm, target, CAM_LUN_WILDCARD);
 1419         } else {
 1420                 mps_dprint(sc, MPS_ERROR, "unexpected reset type 0x%x\n", type);
 1421                 return -1;
 1422         }
 1423 
 1424         tm->cm_data = NULL;
 1425         tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
 1426         tm->cm_complete_data = (void *)tm;
 1427 
 1428         callout_reset(&tm->cm_callout, MPS_RESET_TIMEOUT * hz,
 1429             mpssas_tm_timeout, tm);
 1430 
 1431         err = mps_map_command(sc, tm);
 1432         if (err)
 1433                 mps_dprint(sc, MPS_ERROR|MPS_RECOVERY,
 1434                     "error %d sending reset type %u\n",
 1435                     err, type);
 1436 
 1437         return err;
 1438 }
 1439 
 1440 
 1441 static void
 1442 mpssas_abort_complete(struct mps_softc *sc, struct mps_command *tm)
 1443 {
 1444         struct mps_command *cm;
 1445         MPI2_SCSI_TASK_MANAGE_REPLY *reply;
 1446         MPI2_SCSI_TASK_MANAGE_REQUEST *req;
 1447         struct mpssas_target *targ;
 1448 
 1449         callout_stop(&tm->cm_callout);
 1450 
 1451         req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
 1452         reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
 1453         targ = tm->cm_targ;
 1454 
 1455         /*
 1456          * Currently there should be no way we can hit this case.  It only
 1457          * happens when we have a failure to allocate chain frames, and
 1458          * task management commands don't have S/G lists.
 1459          */
 1460         if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
 1461                 mps_dprint(sc, MPS_RECOVERY,
 1462                     "cm_flags = %#x for abort %p TaskMID %u!\n", 
 1463                     tm->cm_flags, tm, le16toh(req->TaskMID));
 1464                 mpssas_free_tm(sc, tm);
 1465                 return;
 1466         }
 1467 
 1468         if (reply == NULL) {
 1469                 mps_dprint(sc, MPS_RECOVERY,
 1470                     "NULL abort reply for tm %p TaskMID %u\n", 
 1471                     tm, le16toh(req->TaskMID));
 1472                 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
 1473                         /* this completion was due to a reset, just cleanup */
 1474                         mps_dprint(sc, MPS_RECOVERY, "Hardware undergoing "
 1475                             "reset, ignoring NULL abort reply\n");
 1476                         targ->tm = NULL;
 1477                         mpssas_free_tm(sc, tm);
 1478                 } else {
 1479                         /* we should have gotten a reply. */
 1480                         mps_dprint(sc, MPS_INFO|MPS_RECOVERY, "NULL reply on "
 1481                             "abort attempt, resetting controller\n");
 1482                         mps_reinit(sc);
 1483                 }
 1484                 return;
 1485         }
 1486 
 1487         mps_dprint(sc, MPS_RECOVERY,
 1488             "abort TaskMID %u status 0x%x code 0x%x count %u\n",
 1489             le16toh(req->TaskMID),
 1490             le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
 1491             le32toh(reply->TerminationCount));
 1492 
 1493         cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
 1494         if (cm == NULL) {
 1495                 /*
 1496                  * If there are no more timedout commands, we're done with
 1497                  * error recovery for this target.
 1498                  */
 1499                 mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
 1500                     "Finished abort recovery for target %u\n", targ->tid);
 1501 
 1502                 targ->tm = NULL;
 1503                 mpssas_free_tm(sc, tm);
 1504         } else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
 1505                 /* abort success, but we have more timedout commands to abort */
 1506                 mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
 1507                     "Continuing abort recovery for target %u\n", targ->tid);
 1508                 
 1509                 mpssas_send_abort(sc, tm, cm);
 1510         } else {
 1511                 /* we didn't get a command completion, so the abort
 1512                  * failed as far as we're concerned.  escalate.
 1513                  */
 1514                 mps_dprint(sc, MPS_RECOVERY,
 1515                     "Abort failed for target %u, sending logical unit reset\n",
 1516                     targ->tid);
 1517 
 1518                 mpssas_send_reset(sc, tm, 
 1519                     MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
 1520         }
 1521 }
 1522 
 1523 #define MPS_ABORT_TIMEOUT 5
 1524 
 1525 static int
 1526 mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm)
 1527 {
 1528         MPI2_SCSI_TASK_MANAGE_REQUEST *req;
 1529         struct mpssas_target *targ;
 1530         int err;
 1531 
 1532         targ = cm->cm_targ;
 1533         if (targ->handle == 0) {
 1534                 mps_dprint(sc, MPS_ERROR|MPS_RECOVERY,
 1535                     "%s null devhandle for target_id %d\n",
 1536                     __func__, cm->cm_ccb->ccb_h.target_id);
 1537                 return -1;
 1538         }
 1539 
 1540         mpssas_log_command(cm, MPS_RECOVERY|MPS_INFO,
 1541             "Aborting command %p\n", cm);
 1542 
 1543         req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
 1544         req->DevHandle = htole16(targ->handle);
 1545         req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
 1546         req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
 1547 
 1548         /* XXX Need to handle invalid LUNs */
 1549         MPS_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
 1550 
 1551         req->TaskMID = htole16(cm->cm_desc.Default.SMID);
 1552 
 1553         tm->cm_data = NULL;
 1554         tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
 1555         tm->cm_complete = mpssas_abort_complete;
 1556         tm->cm_complete_data = (void *)tm;
 1557         tm->cm_targ = cm->cm_targ;
 1558         tm->cm_lun = cm->cm_lun;
 1559 
 1560         callout_reset(&tm->cm_callout, MPS_ABORT_TIMEOUT * hz,
 1561             mpssas_tm_timeout, tm);
 1562 
 1563         targ->aborts++;
 1564 
 1565         mpssas_prepare_for_tm(sc, tm, targ, tm->cm_lun);
 1566 
 1567         err = mps_map_command(sc, tm);
 1568         if (err)
 1569                 mps_dprint(sc, MPS_ERROR|MPS_RECOVERY,
 1570                     "error %d sending abort for cm %p SMID %u\n",
 1571                     err, cm, req->TaskMID);
 1572         return err;
 1573 }
 1574 
 1575 static void
 1576 mpssas_scsiio_timeout(void *data)
 1577 {
 1578         sbintime_t elapsed, now;
 1579         union ccb *ccb;
 1580         struct mps_softc *sc;
 1581         struct mps_command *cm;
 1582         struct mpssas_target *targ;
 1583 
 1584         cm = (struct mps_command *)data;
 1585         sc = cm->cm_sc;
 1586         ccb = cm->cm_ccb;
 1587         now = sbinuptime();
 1588 
 1589         MPS_FUNCTRACE(sc);
 1590         mtx_assert(&sc->mps_mtx, MA_OWNED);
 1591 
 1592         mps_dprint(sc, MPS_XINFO|MPS_RECOVERY, "Timeout checking cm %p\n", sc);
 1593 
 1594         /*
 1595          * Run the interrupt handler to make sure it's not pending.  This
 1596          * isn't perfect because the command could have already completed
 1597          * and been re-used, though this is unlikely.
 1598          */
 1599         mps_intr_locked(sc);
 1600         if (cm->cm_state != MPS_CM_STATE_INQUEUE) {
 1601                 mpssas_log_command(cm, MPS_XINFO,
 1602                     "SCSI command %p almost timed out\n", cm);
 1603                 return;
 1604         }
 1605 
 1606         if (cm->cm_ccb == NULL) {
 1607                 mps_dprint(sc, MPS_ERROR, "command timeout with NULL ccb\n");
 1608                 return;
 1609         }
 1610 
 1611         targ = cm->cm_targ;
 1612         targ->timeouts++;
 1613 
 1614         elapsed = now - ccb->ccb_h.qos.sim_data;
 1615         mpssas_log_command(cm, MPS_INFO|MPS_RECOVERY,
 1616             "Command timeout on target %u(0x%04x) %d set, %d.%d elapsed\n",
 1617             targ->tid, targ->handle, ccb->ccb_h.timeout,
 1618             sbintime_getsec(elapsed), elapsed & 0xffffffff);
 1619 
 1620         /* XXX first, check the firmware state, to see if it's still
 1621          * operational.  if not, do a diag reset.
 1622          */
 1623         mpssas_set_ccbstatus(cm->cm_ccb, CAM_CMD_TIMEOUT);
 1624         cm->cm_state = MPS_CM_STATE_TIMEDOUT;
 1625         TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
 1626 
 1627         if (targ->tm != NULL) {
 1628                 /* target already in recovery, just queue up another
 1629                  * timedout command to be processed later.
 1630                  */
 1631                 mps_dprint(sc, MPS_RECOVERY,
 1632                     "queued timedout cm %p for processing by tm %p\n",
 1633                     cm, targ->tm);
 1634         } else if ((targ->tm = mpssas_alloc_tm(sc)) != NULL) {
 1635                 mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
 1636                     "Sending abort to target %u for SMID %d\n", targ->tid,
 1637                     cm->cm_desc.Default.SMID);
 1638                 mps_dprint(sc, MPS_RECOVERY, "timedout cm %p allocated tm %p\n",
 1639                     cm, targ->tm);
 1640 
 1641                 /* start recovery by aborting the first timedout command */
 1642                 mpssas_send_abort(sc, targ->tm, cm);
 1643         } else {
 1644                 /* XXX queue this target up for recovery once a TM becomes
 1645                  * available.  The firmware only has a limited number of
 1646                  * HighPriority credits for the high priority requests used
 1647                  * for task management, and we ran out.
 1648                  * 
 1649                  * Isilon: don't worry about this for now, since we have
 1650                  * more credits than disks in an enclosure, and limit
 1651                  * ourselves to one TM per target for recovery.
 1652                  */
 1653                 mps_dprint(sc, MPS_ERROR|MPS_RECOVERY,
 1654                     "timedout cm %p failed to allocate a tm\n", cm);
 1655         }
 1656 
 1657 }
 1658 
 1659 static void
 1660 mpssas_action_scsiio(struct mpssas_softc *sassc, union ccb *ccb)
 1661 {
 1662         MPI2_SCSI_IO_REQUEST *req;
 1663         struct ccb_scsiio *csio;
 1664         struct mps_softc *sc;
 1665         struct mpssas_target *targ;
 1666         struct mpssas_lun *lun;
 1667         struct mps_command *cm;
 1668         uint8_t i, lba_byte, *ref_tag_addr;
 1669         uint16_t eedp_flags;
 1670         uint32_t mpi_control;
 1671 
 1672         sc = sassc->sc;
 1673         MPS_FUNCTRACE(sc);
 1674         mtx_assert(&sc->mps_mtx, MA_OWNED);
 1675 
 1676         csio = &ccb->csio;
 1677         KASSERT(csio->ccb_h.target_id < sassc->maxtargets,
 1678             ("Target %d out of bounds in XPT_SCSI_IO\n",
 1679              csio->ccb_h.target_id));
 1680         targ = &sassc->targets[csio->ccb_h.target_id];
 1681         mps_dprint(sc, MPS_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
 1682         if (targ->handle == 0x0) {
 1683                 mps_dprint(sc, MPS_ERROR, "%s NULL handle for target %u\n", 
 1684                     __func__, csio->ccb_h.target_id);
 1685                 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
 1686                 xpt_done(ccb);
 1687                 return;
 1688         }
 1689         if (targ->flags & MPS_TARGET_FLAGS_RAID_COMPONENT) {
 1690                 mps_dprint(sc, MPS_ERROR, "%s Raid component no SCSI IO "
 1691                     "supported %u\n", __func__, csio->ccb_h.target_id);
 1692                 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
 1693                 xpt_done(ccb);
 1694                 return;
 1695         }
 1696         /*
 1697          * Sometimes, it is possible to get a command that is not "In
 1698          * Progress" and was actually aborted by the upper layer.  Check for
 1699          * this here and complete the command without error.
 1700          */
 1701         if (mpssas_get_ccbstatus(ccb) != CAM_REQ_INPROG) {
 1702                 mps_dprint(sc, MPS_TRACE, "%s Command is not in progress for "
 1703                     "target %u\n", __func__, csio->ccb_h.target_id);
 1704                 xpt_done(ccb);
 1705                 return;
 1706         }
 1707         /*
 1708          * If devinfo is 0 this will be a volume.  In that case don't tell CAM
 1709          * that the volume has timed out.  We want volumes to be enumerated
 1710          * until they are deleted/removed, not just failed.
 1711          */
 1712         if (targ->flags & MPSSAS_TARGET_INREMOVAL) {
 1713                 if (targ->devinfo == 0)
 1714                         mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
 1715                 else
 1716                         mpssas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
 1717                 xpt_done(ccb);
 1718                 return;
 1719         }
 1720 
 1721         if ((sc->mps_flags & MPS_FLAGS_SHUTDOWN) != 0) {
 1722                 mps_dprint(sc, MPS_INFO, "%s shutting down\n", __func__);
 1723                 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
 1724                 xpt_done(ccb);
 1725                 return;
 1726         }
 1727 
 1728         /*
 1729          * If target has a reset in progress, freeze the devq and return.  The
 1730          * devq will be released when the TM reset is finished.
 1731          */
 1732         if (targ->flags & MPSSAS_TARGET_INRESET) {
 1733                 ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN;
 1734                 mps_dprint(sc, MPS_INFO, "%s: Freezing devq for target ID %d\n",
 1735                     __func__, targ->tid);
 1736                 xpt_freeze_devq(ccb->ccb_h.path, 1);
 1737                 xpt_done(ccb);
 1738                 return;
 1739         }
 1740 
 1741         cm = mps_alloc_command(sc);
 1742         if (cm == NULL || (sc->mps_flags & MPS_FLAGS_DIAGRESET)) {
 1743                 if (cm != NULL) {
 1744                         mps_free_command(sc, cm);
 1745                 }
 1746                 if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
 1747                         xpt_freeze_simq(sassc->sim, 1);
 1748                         sassc->flags |= MPSSAS_QUEUE_FROZEN;
 1749                 }
 1750                 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
 1751                 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
 1752                 xpt_done(ccb);
 1753                 return;
 1754         }
 1755 
 1756         req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
 1757         bzero(req, sizeof(*req));
 1758         req->DevHandle = htole16(targ->handle);
 1759         req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
 1760         req->MsgFlags = 0;
 1761         req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
 1762         req->SenseBufferLength = MPS_SENSE_LEN;
 1763         req->SGLFlags = 0;
 1764         req->ChainOffset = 0;
 1765         req->SGLOffset0 = 24;   /* 32bit word offset to the SGL */
 1766         req->SGLOffset1= 0;
 1767         req->SGLOffset2= 0;
 1768         req->SGLOffset3= 0;
 1769         req->SkipCount = 0;
 1770         req->DataLength = htole32(csio->dxfer_len);
 1771         req->BidirectionalDataLength = 0;
 1772         req->IoFlags = htole16(csio->cdb_len);
 1773         req->EEDPFlags = 0;
 1774 
 1775         /* Note: BiDirectional transfers are not supported */
 1776         switch (csio->ccb_h.flags & CAM_DIR_MASK) {
 1777         case CAM_DIR_IN:
 1778                 mpi_control = MPI2_SCSIIO_CONTROL_READ;
 1779                 cm->cm_flags |= MPS_CM_FLAGS_DATAIN;
 1780                 break;
 1781         case CAM_DIR_OUT:
 1782                 mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
 1783                 cm->cm_flags |= MPS_CM_FLAGS_DATAOUT;
 1784                 break;
 1785         case CAM_DIR_NONE:
 1786         default:
 1787                 mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
 1788                 break;
 1789         }
 1790  
 1791         if (csio->cdb_len == 32)
 1792                 mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
 1793         /*
 1794          * It looks like the hardware doesn't require an explicit tag
 1795          * number for each transaction.  SAM Task Management not supported
 1796          * at the moment.
 1797          */
 1798         switch (csio->tag_action) {
 1799         case MSG_HEAD_OF_Q_TAG:
 1800                 mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
 1801                 break;
 1802         case MSG_ORDERED_Q_TAG:
 1803                 mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
 1804                 break;
 1805         case MSG_ACA_TASK:
 1806                 mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
 1807                 break;
 1808         case CAM_TAG_ACTION_NONE:
 1809         case MSG_SIMPLE_Q_TAG:
 1810         default:
 1811                 mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
 1812                 break;
 1813         }
 1814         mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
 1815         req->Control = htole32(mpi_control);
 1816         if (MPS_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
 1817                 mps_free_command(sc, cm);
 1818                 mpssas_set_ccbstatus(ccb, CAM_LUN_INVALID);
 1819                 xpt_done(ccb);
 1820                 return;
 1821         }
 1822 
 1823         if (csio->ccb_h.flags & CAM_CDB_POINTER)
 1824                 bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
 1825         else
 1826                 bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
 1827         req->IoFlags = htole16(csio->cdb_len);
 1828 
 1829         /*
 1830          * Check if EEDP is supported and enabled.  If it is then check if the
 1831          * SCSI opcode could be using EEDP.  If so, make sure the LUN exists and
 1832          * is formatted for EEDP support.  If all of this is true, set CDB up
 1833          * for EEDP transfer.
 1834          */
 1835         eedp_flags = op_code_prot[req->CDB.CDB32[0]];
 1836         if (sc->eedp_enabled && eedp_flags) {
 1837                 SLIST_FOREACH(lun, &targ->luns, lun_link) {
 1838                         if (lun->lun_id == csio->ccb_h.target_lun) {
 1839                                 break;
 1840                         }
 1841                 }
 1842 
 1843                 if ((lun != NULL) && (lun->eedp_formatted)) {
 1844                         req->EEDPBlockSize = htole16(lun->eedp_block_size);
 1845                         eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
 1846                             MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
 1847                             MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
 1848                         req->EEDPFlags = htole16(eedp_flags);
 1849 
 1850                         /*
 1851                          * If CDB less than 32, fill in Primary Ref Tag with
 1852                          * low 4 bytes of LBA.  If CDB is 32, tag stuff is
 1853                          * already there.  Also, set protection bit.  FreeBSD
 1854                          * currently does not support CDBs bigger than 16, but
 1855                          * the code doesn't hurt, and will be here for the
 1856                          * future.
 1857                          */
 1858                         if (csio->cdb_len != 32) {
 1859                                 lba_byte = (csio->cdb_len == 16) ? 6 : 2;
 1860                                 ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
 1861                                     PrimaryReferenceTag;
 1862                                 for (i = 0; i < 4; i++) {
 1863                                         *ref_tag_addr =
 1864                                             req->CDB.CDB32[lba_byte + i];
 1865                                         ref_tag_addr++;
 1866                                 }
 1867                                 req->CDB.EEDP32.PrimaryReferenceTag = 
 1868                                         htole32(req->CDB.EEDP32.PrimaryReferenceTag);
 1869                                 req->CDB.EEDP32.PrimaryApplicationTagMask =
 1870                                     0xFFFF;
 1871                                 req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) |
 1872                                     0x20;
 1873                         } else {
 1874                                 eedp_flags |=
 1875                                     MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
 1876                                 req->EEDPFlags = htole16(eedp_flags);
 1877                                 req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
 1878                                     0x1F) | 0x20;
 1879                         }
 1880                 }
 1881         }
 1882 
 1883         cm->cm_length = csio->dxfer_len;
 1884         if (cm->cm_length != 0) {
 1885                 cm->cm_data = ccb;
 1886                 cm->cm_flags |= MPS_CM_FLAGS_USE_CCB;
 1887         } else {
 1888                 cm->cm_data = NULL;
 1889         }
 1890         cm->cm_sge = &req->SGL;
 1891         cm->cm_sglsize = (32 - 24) * 4;
 1892         cm->cm_desc.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
 1893         cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
 1894         cm->cm_complete = mpssas_scsiio_complete;
 1895         cm->cm_complete_data = ccb;
 1896         cm->cm_targ = targ;
 1897         cm->cm_lun = csio->ccb_h.target_lun;
 1898         cm->cm_ccb = ccb;
 1899 
 1900         /*
 1901          * If HBA is a WD and the command is not for a retry, try to build a
 1902          * direct I/O message. If failed, or the command is for a retry, send
 1903          * the I/O to the IR volume itself.
 1904          */
 1905         if (sc->WD_valid_config) {
 1906                 if (ccb->ccb_h.sim_priv.entries[0].field == MPS_WD_RETRY) {
 1907                         mpssas_direct_drive_io(sassc, cm, ccb);
 1908                 } else {
 1909                         mpssas_set_ccbstatus(ccb, CAM_REQ_INPROG);
 1910                 }
 1911         }
 1912 
 1913 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
 1914         if (csio->bio != NULL)
 1915                 biotrack(csio->bio, __func__);
 1916 #endif
 1917         csio->ccb_h.qos.sim_data = sbinuptime();
 1918         callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
 1919             mpssas_scsiio_timeout, cm, 0);
 1920 
 1921         targ->issued++;
 1922         targ->outstanding++;
 1923         TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
 1924         ccb->ccb_h.status |= CAM_SIM_QUEUED;
 1925 
 1926         mpssas_log_command(cm, MPS_XINFO, "%s cm %p ccb %p outstanding %u\n",
 1927             __func__, cm, ccb, targ->outstanding);
 1928 
 1929         mps_map_command(sc, cm);
 1930         return;
 1931 }
 1932 
 1933 /**
 1934  * mps_sc_failed_io_info - translated non-succesfull SCSI_IO request
 1935  */
 1936 static void
 1937 mps_sc_failed_io_info(struct mps_softc *sc, struct ccb_scsiio *csio,
 1938     Mpi2SCSIIOReply_t *mpi_reply)
 1939 {
 1940         u32 response_info;
 1941         u8 *response_bytes;
 1942         u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
 1943             MPI2_IOCSTATUS_MASK;
 1944         u8 scsi_state = mpi_reply->SCSIState;
 1945         u8 scsi_status = mpi_reply->SCSIStatus;
 1946         u32 log_info = le32toh(mpi_reply->IOCLogInfo);
 1947         const char *desc_ioc_state, *desc_scsi_status;
 1948         
 1949         if (log_info == 0x31170000)
 1950                 return;
 1951 
 1952         desc_ioc_state = mps_describe_table(mps_iocstatus_string,
 1953             ioc_status);
 1954         desc_scsi_status = mps_describe_table(mps_scsi_status_string,
 1955             scsi_status);
 1956 
 1957         mps_dprint(sc, MPS_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
 1958             le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
 1959 
 1960         /*
 1961          *We can add more detail about underflow data here
 1962          * TO-DO
 1963          */
 1964         mps_dprint(sc, MPS_XINFO, "\tscsi_status(%s)(0x%02x), "
 1965             "scsi_state %b\n", desc_scsi_status, scsi_status,
 1966             scsi_state, "\2" "\1AutosenseValid" "\2AutosenseFailed"
 1967             "\3NoScsiStatus" "\4Terminated" "\5Response InfoValid");
 1968 
 1969         if (sc->mps_debug & MPS_XINFO &&
 1970                 scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
 1971                 mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : Start :\n");
 1972                 scsi_sense_print(csio);
 1973                 mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : End :\n");
 1974         }
 1975 
 1976         if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
 1977                 response_info = le32toh(mpi_reply->ResponseInfo);
 1978                 response_bytes = (u8 *)&response_info;
 1979                 mps_dprint(sc, MPS_XINFO, "response code(0x%1x): %s\n",
 1980                     response_bytes[0],
 1981                     mps_describe_table(mps_scsi_taskmgmt_string,
 1982                     response_bytes[0]));
 1983         }
 1984 }
 1985 
 1986 static void
 1987 mpssas_scsiio_complete(struct mps_softc *sc, struct mps_command *cm)
 1988 {
 1989         MPI2_SCSI_IO_REPLY *rep;
 1990         union ccb *ccb;
 1991         struct ccb_scsiio *csio;
 1992         struct mpssas_softc *sassc;
 1993         struct scsi_vpd_supported_page_list *vpd_list = NULL;
 1994         u8 *TLR_bits, TLR_on;
 1995         int dir = 0, i;
 1996         u16 alloc_len;
 1997         struct mpssas_target *target;
 1998         target_id_t target_id;
 1999 
 2000         MPS_FUNCTRACE(sc);
 2001         mps_dprint(sc, MPS_TRACE,
 2002             "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm,
 2003             cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
 2004             cm->cm_targ->outstanding);
 2005 
 2006         callout_stop(&cm->cm_callout);
 2007         mtx_assert(&sc->mps_mtx, MA_OWNED);
 2008 
 2009         sassc = sc->sassc;
 2010         ccb = cm->cm_complete_data;
 2011         csio = &ccb->csio;
 2012         target_id = csio->ccb_h.target_id;
 2013         rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
 2014         /*
 2015          * XXX KDM if the chain allocation fails, does it matter if we do
 2016          * the sync and unload here?  It is simpler to do it in every case,
 2017          * assuming it doesn't cause problems.
 2018          */
 2019         if (cm->cm_data != NULL) {
 2020                 if (cm->cm_flags & MPS_CM_FLAGS_DATAIN)
 2021                         dir = BUS_DMASYNC_POSTREAD;
 2022                 else if (cm->cm_flags & MPS_CM_FLAGS_DATAOUT)
 2023                         dir = BUS_DMASYNC_POSTWRITE;
 2024                 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
 2025                 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
 2026         }
 2027 
 2028         cm->cm_targ->completed++;
 2029         cm->cm_targ->outstanding--;
 2030         TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
 2031         ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
 2032 
 2033 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
 2034         if (ccb->csio.bio != NULL)
 2035                 biotrack(ccb->csio.bio, __func__);
 2036 #endif
 2037 
 2038         if (cm->cm_state == MPS_CM_STATE_TIMEDOUT) {
 2039                 TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
 2040                 cm->cm_state = MPS_CM_STATE_BUSY;
 2041                 if (cm->cm_reply != NULL)
 2042                         mpssas_log_command(cm, MPS_RECOVERY,
 2043                             "completed timedout cm %p ccb %p during recovery "
 2044                             "ioc %x scsi %x state %x xfer %u\n",
 2045                             cm, cm->cm_ccb, le16toh(rep->IOCStatus),
 2046                             rep->SCSIStatus, rep->SCSIState,
 2047                             le32toh(rep->TransferCount));
 2048                 else
 2049                         mpssas_log_command(cm, MPS_RECOVERY,
 2050                             "completed timedout cm %p ccb %p during recovery\n",
 2051                             cm, cm->cm_ccb);
 2052         } else if (cm->cm_targ->tm != NULL) {
 2053                 if (cm->cm_reply != NULL)
 2054                         mpssas_log_command(cm, MPS_RECOVERY,
 2055                             "completed cm %p ccb %p during recovery "
 2056                             "ioc %x scsi %x state %x xfer %u\n",
 2057                             cm, cm->cm_ccb, le16toh(rep->IOCStatus),
 2058                             rep->SCSIStatus, rep->SCSIState,
 2059                             le32toh(rep->TransferCount));
 2060                 else
 2061                         mpssas_log_command(cm, MPS_RECOVERY,
 2062                             "completed cm %p ccb %p during recovery\n",
 2063                             cm, cm->cm_ccb);
 2064         } else if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
 2065                 mpssas_log_command(cm, MPS_RECOVERY,
 2066                     "reset completed cm %p ccb %p\n",
 2067                     cm, cm->cm_ccb);
 2068         }
 2069 
 2070         if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
 2071                 /*
 2072                  * We ran into an error after we tried to map the command,
 2073                  * so we're getting a callback without queueing the command
 2074                  * to the hardware.  So we set the status here, and it will
 2075                  * be retained below.  We'll go through the "fast path",
 2076                  * because there can be no reply when we haven't actually
 2077                  * gone out to the hardware.
 2078                  */
 2079                 mpssas_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
 2080 
 2081                 /*
 2082                  * Currently the only error included in the mask is
 2083                  * MPS_CM_FLAGS_CHAIN_FAILED, which means we're out of
 2084                  * chain frames.  We need to freeze the queue until we get
 2085                  * a command that completed without this error, which will
 2086                  * hopefully have some chain frames attached that we can
 2087                  * use.  If we wanted to get smarter about it, we would
 2088                  * only unfreeze the queue in this condition when we're
 2089                  * sure that we're getting some chain frames back.  That's
 2090                  * probably unnecessary.
 2091                  */
 2092                 if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
 2093                         xpt_freeze_simq(sassc->sim, 1);
 2094                         sassc->flags |= MPSSAS_QUEUE_FROZEN;
 2095                         mps_dprint(sc, MPS_XINFO, "Error sending command, "
 2096                                    "freezing SIM queue\n");
 2097                 }
 2098         }
 2099 
 2100         /*
 2101          * If this is a Start Stop Unit command and it was issued by the driver
 2102          * during shutdown, decrement the refcount to account for all of the
 2103          * commands that were sent.  All SSU commands should be completed before
 2104          * shutdown completes, meaning SSU_refcount will be 0 after SSU_started
 2105          * is TRUE.
 2106          */
 2107         if (sc->SSU_started && (csio->cdb_io.cdb_bytes[0] == START_STOP_UNIT)) {
 2108                 mps_dprint(sc, MPS_INFO, "Decrementing SSU count.\n");
 2109                 sc->SSU_refcount--;
 2110         }
 2111 
 2112         /* Take the fast path to completion */
 2113         if (cm->cm_reply == NULL) {
 2114                 if (mpssas_get_ccbstatus(ccb) == CAM_REQ_INPROG) {
 2115                         if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0)
 2116                                 mpssas_set_ccbstatus(ccb, CAM_SCSI_BUS_RESET);
 2117                         else {
 2118                                 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
 2119                                 ccb->csio.scsi_status = SCSI_STATUS_OK;
 2120                         }
 2121                         if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
 2122                                 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
 2123                                 sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
 2124                                 mps_dprint(sc, MPS_XINFO,
 2125                                     "Unfreezing SIM queue\n");
 2126                         }
 2127                 } 
 2128 
 2129                 /*
 2130                  * There are two scenarios where the status won't be
 2131                  * CAM_REQ_CMP.  The first is if MPS_CM_FLAGS_ERROR_MASK is
 2132                  * set, the second is in the MPS_FLAGS_DIAGRESET above.
 2133                  */
 2134                 if (mpssas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
 2135                         /*
 2136                          * Freeze the dev queue so that commands are
 2137                          * executed in the correct order after error
 2138                          * recovery.
 2139                          */
 2140                         ccb->ccb_h.status |= CAM_DEV_QFRZN;
 2141                         xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
 2142                 }
 2143                 mps_free_command(sc, cm);
 2144                 xpt_done(ccb);
 2145                 return;
 2146         }
 2147 
 2148         mpssas_log_command(cm, MPS_XINFO,
 2149             "ioc %x scsi %x state %x xfer %u\n",
 2150             le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
 2151             le32toh(rep->TransferCount));
 2152 
 2153         /*
 2154          * If this is a Direct Drive I/O, reissue the I/O to the original IR
 2155          * Volume if an error occurred (normal I/O retry).  Use the original
 2156          * CCB, but set a flag that this will be a retry so that it's sent to
 2157          * the original volume.  Free the command but reuse the CCB.
 2158          */
 2159         if (cm->cm_flags & MPS_CM_FLAGS_DD_IO) {
 2160                 mps_free_command(sc, cm);
 2161                 ccb->ccb_h.sim_priv.entries[0].field = MPS_WD_RETRY;
 2162                 mpssas_action_scsiio(sassc, ccb);
 2163                 return;
 2164         } else
 2165                 ccb->ccb_h.sim_priv.entries[0].field = 0;
 2166 
 2167         switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
 2168         case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
 2169                 csio->resid = cm->cm_length - le32toh(rep->TransferCount);
 2170                 /* FALLTHROUGH */
 2171         case MPI2_IOCSTATUS_SUCCESS:
 2172         case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
 2173 
 2174                 if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
 2175                     MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
 2176                         mpssas_log_command(cm, MPS_XINFO, "recovered error\n");
 2177 
 2178                 /* Completion failed at the transport level. */
 2179                 if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
 2180                     MPI2_SCSI_STATE_TERMINATED)) {
 2181                         mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
 2182                         break;
 2183                 }
 2184 
 2185                 /* In a modern packetized environment, an autosense failure
 2186                  * implies that there's not much else that can be done to
 2187                  * recover the command.
 2188                  */
 2189                 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
 2190                         mpssas_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL);
 2191                         break;
 2192                 }
 2193 
 2194                 /*
 2195                  * CAM doesn't care about SAS Response Info data, but if this is
 2196                  * the state check if TLR should be done.  If not, clear the
 2197                  * TLR_bits for the target.
 2198                  */
 2199                 if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
 2200                     ((le32toh(rep->ResponseInfo) &
 2201                     MPI2_SCSI_RI_MASK_REASONCODE) ==
 2202                     MPS_SCSI_RI_INVALID_FRAME)) {
 2203                         sc->mapping_table[target_id].TLR_bits =
 2204                             (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
 2205                 }
 2206 
 2207                 /*
 2208                  * Intentionally override the normal SCSI status reporting
 2209                  * for these two cases.  These are likely to happen in a
 2210                  * multi-initiator environment, and we want to make sure that
 2211                  * CAM retries these commands rather than fail them.
 2212                  */
 2213                 if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
 2214                     (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
 2215                         mpssas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
 2216                         break;
 2217                 }
 2218 
 2219                 /* Handle normal status and sense */
 2220                 csio->scsi_status = rep->SCSIStatus;
 2221                 if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
 2222                         mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
 2223                 else
 2224                         mpssas_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR);
 2225 
 2226                 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
 2227                         int sense_len, returned_sense_len;
 2228 
 2229                         returned_sense_len = min(le32toh(rep->SenseCount),
 2230                             sizeof(struct scsi_sense_data));
 2231                         if (returned_sense_len < ccb->csio.sense_len)
 2232                                 ccb->csio.sense_resid = ccb->csio.sense_len -
 2233                                         returned_sense_len;
 2234                         else
 2235                                 ccb->csio.sense_resid = 0;
 2236 
 2237                         sense_len = min(returned_sense_len,
 2238                             ccb->csio.sense_len - ccb->csio.sense_resid);
 2239                         bzero(&ccb->csio.sense_data,
 2240                               sizeof(ccb->csio.sense_data));
 2241                         bcopy(cm->cm_sense, &ccb->csio.sense_data, sense_len);
 2242                         ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
 2243                 }
 2244 
 2245                 /*
 2246                  * Check if this is an INQUIRY command.  If it's a VPD inquiry,
 2247                  * and it's page code 0 (Supported Page List), and there is
 2248                  * inquiry data, and this is for a sequential access device, and
 2249                  * the device is an SSP target, and TLR is supported by the
 2250                  * controller, turn the TLR_bits value ON if page 0x90 is
 2251                  * supported.
 2252                  */
 2253                 if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
 2254                     (csio->cdb_io.cdb_bytes[1] & SI_EVPD) &&
 2255                     (csio->cdb_io.cdb_bytes[2] == SVPD_SUPPORTED_PAGE_LIST) &&
 2256                     ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
 2257                     (csio->data_ptr != NULL) &&
 2258                     ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) &&
 2259                     (sc->control_TLR) &&
 2260                     (sc->mapping_table[target_id].device_info &
 2261                     MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
 2262                         vpd_list = (struct scsi_vpd_supported_page_list *)
 2263                             csio->data_ptr;
 2264                         TLR_bits = &sc->mapping_table[target_id].TLR_bits;
 2265                         *TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
 2266                         TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
 2267                         alloc_len = ((u16)csio->cdb_io.cdb_bytes[3] << 8) +
 2268                             csio->cdb_io.cdb_bytes[4];
 2269                         alloc_len -= csio->resid;
 2270                         for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
 2271                                 if (vpd_list->list[i] == 0x90) {
 2272                                         *TLR_bits = TLR_on;
 2273                                         break;
 2274                                 }
 2275                         }
 2276                 }
 2277 
 2278                 /*
 2279                  * If this is a SATA direct-access end device, mark it so that
 2280                  * a SCSI StartStopUnit command will be sent to it when the
 2281                  * driver is being shutdown.
 2282                  */
 2283                 if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
 2284                     ((csio->data_ptr[0] & 0x1f) == T_DIRECT) &&
 2285                     (sc->mapping_table[target_id].device_info &
 2286                     MPI2_SAS_DEVICE_INFO_SATA_DEVICE) &&
 2287                     ((sc->mapping_table[target_id].device_info &
 2288                     MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) ==
 2289                     MPI2_SAS_DEVICE_INFO_END_DEVICE)) {
 2290                         target = &sassc->targets[target_id];
 2291                         target->supports_SSU = TRUE;
 2292                         mps_dprint(sc, MPS_XINFO, "Target %d supports SSU\n",
 2293                             target_id);
 2294                 }
 2295                 break;
 2296         case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
 2297         case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
 2298                 /*
 2299                  * If devinfo is 0 this will be a volume.  In that case don't
 2300                  * tell CAM that the volume is not there.  We want volumes to
 2301                  * be enumerated until they are deleted/removed, not just
 2302                  * failed.
 2303                  */
 2304                 if (cm->cm_targ->devinfo == 0)
 2305                         mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
 2306                 else
 2307                         mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
 2308                 break;
 2309         case MPI2_IOCSTATUS_INVALID_SGL:
 2310                 mps_print_scsiio_cmd(sc, cm);
 2311                 mpssas_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR);
 2312                 break;
 2313         case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
 2314                 /*
 2315                  * This is one of the responses that comes back when an I/O
 2316                  * has been aborted.  If it is because of a timeout that we
 2317                  * initiated, just set the status to CAM_CMD_TIMEOUT.
 2318                  * Otherwise set it to CAM_REQ_ABORTED.  The effect on the
 2319                  * command is the same (it gets retried, subject to the
 2320                  * retry counter), the only difference is what gets printed
 2321                  * on the console.
 2322                  */
 2323                 if (cm->cm_state == MPS_CM_STATE_TIMEDOUT)
 2324                         mpssas_set_ccbstatus(ccb, CAM_CMD_TIMEOUT);
 2325                 else
 2326                         mpssas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
 2327                 break;
 2328         case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
 2329                 /* resid is ignored for this condition */
 2330                 csio->resid = 0;
 2331                 mpssas_set_ccbstatus(ccb, CAM_DATA_RUN_ERR);
 2332                 break;
 2333         case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
 2334         case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
 2335                 /*
 2336                  * These can sometimes be transient transport-related
 2337                  * errors, and sometimes persistent drive-related errors.
 2338                  * We used to retry these without decrementing the retry
 2339                  * count by returning CAM_REQUEUE_REQ.  Unfortunately, if
 2340                  * we hit a persistent drive problem that returns one of
 2341                  * these error codes, we would retry indefinitely.  So,
 2342                  * return CAM_REQ_CMP_ERROR so that we decrement the retry
 2343                  * count and avoid infinite retries.  We're taking the
 2344                  * potential risk of flagging false failures in the event
 2345                  * of a topology-related error (e.g. a SAS expander problem
 2346                  * causes a command addressed to a drive to fail), but
 2347                  * avoiding getting into an infinite retry loop.
 2348                  */
 2349                 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
 2350                 mps_dprint(sc, MPS_INFO,
 2351                     "Controller reported %s tgt %u SMID %u loginfo %x\n",
 2352                     mps_describe_table(mps_iocstatus_string,
 2353                     le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK),
 2354                     target_id, cm->cm_desc.Default.SMID,
 2355                     le32toh(rep->IOCLogInfo));
 2356                 mps_dprint(sc, MPS_XINFO,
 2357                     "SCSIStatus %x SCSIState %x xfercount %u\n",
 2358                     rep->SCSIStatus, rep->SCSIState,
 2359                     le32toh(rep->TransferCount));
 2360                 break;
 2361         case MPI2_IOCSTATUS_INVALID_FUNCTION:
 2362         case MPI2_IOCSTATUS_INTERNAL_ERROR:
 2363         case MPI2_IOCSTATUS_INVALID_VPID:
 2364         case MPI2_IOCSTATUS_INVALID_FIELD:
 2365         case MPI2_IOCSTATUS_INVALID_STATE:
 2366         case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
 2367         case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
 2368         case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
 2369         case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
 2370         case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
 2371         default:
 2372                 mpssas_log_command(cm, MPS_XINFO,
 2373                     "completed ioc %x loginfo %x scsi %x state %x xfer %u\n",
 2374                     le16toh(rep->IOCStatus), le32toh(rep->IOCLogInfo),
 2375                     rep->SCSIStatus, rep->SCSIState,
 2376                     le32toh(rep->TransferCount));
 2377                 csio->resid = cm->cm_length;
 2378                 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
 2379                 break;
 2380         }
 2381         
 2382         mps_sc_failed_io_info(sc,csio,rep);
 2383 
 2384         if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
 2385                 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
 2386                 sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
 2387                 mps_dprint(sc, MPS_XINFO, "Command completed, "
 2388                     "unfreezing SIM queue\n");
 2389         }
 2390 
 2391         if (mpssas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
 2392                 ccb->ccb_h.status |= CAM_DEV_QFRZN;
 2393                 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
 2394         }
 2395 
 2396         mps_free_command(sc, cm);
 2397         xpt_done(ccb);
 2398 }
 2399 
 2400 /* All Request reached here are Endian safe */
 2401 static void
 2402 mpssas_direct_drive_io(struct mpssas_softc *sassc, struct mps_command *cm,
 2403     union ccb *ccb) {
 2404         pMpi2SCSIIORequest_t    pIO_req;
 2405         struct mps_softc        *sc = sassc->sc;
 2406         uint64_t                virtLBA;
 2407         uint32_t                physLBA, stripe_offset, stripe_unit;
 2408         uint32_t                io_size, column;
 2409         uint8_t                 *ptrLBA, lba_idx, physLBA_byte, *CDB;
 2410 
 2411         /*
 2412          * If this is a valid SCSI command (Read6, Read10, Read16, Write6,
 2413          * Write10, or Write16), build a direct I/O message.  Otherwise, the I/O
 2414          * will be sent to the IR volume itself.  Since Read6 and Write6 are a
 2415          * bit different than the 10/16 CDBs, handle them separately.
 2416          */
 2417         pIO_req = (pMpi2SCSIIORequest_t)cm->cm_req;
 2418         CDB = pIO_req->CDB.CDB32;
 2419 
 2420         /*
 2421          * Handle 6 byte CDBs.
 2422          */
 2423         if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_6) ||
 2424             (CDB[0] == WRITE_6))) {
 2425                 /*
 2426                  * Get the transfer size in blocks.
 2427                  */
 2428                 io_size = (cm->cm_length >> sc->DD_block_exponent);
 2429 
 2430                 /*
 2431                  * Get virtual LBA given in the CDB.
 2432                  */
 2433                 virtLBA = ((uint64_t)(CDB[1] & 0x1F) << 16) |
 2434                     ((uint64_t)CDB[2] << 8) | (uint64_t)CDB[3];
 2435 
 2436                 /*
 2437                  * Check that LBA range for I/O does not exceed volume's
 2438                  * MaxLBA.
 2439                  */
 2440                 if ((virtLBA + (uint64_t)io_size - 1) <=
 2441                     sc->DD_max_lba) {
 2442                         /*
 2443                          * Check if the I/O crosses a stripe boundary.  If not,
 2444                          * translate the virtual LBA to a physical LBA and set
 2445                          * the DevHandle for the PhysDisk to be used.  If it
 2446                          * does cross a boundary, do normal I/O.  To get the
 2447                          * right DevHandle to use, get the map number for the
 2448                          * column, then use that map number to look up the
 2449                          * DevHandle of the PhysDisk.
 2450                          */
 2451                         stripe_offset = (uint32_t)virtLBA &
 2452                             (sc->DD_stripe_size - 1);
 2453                         if ((stripe_offset + io_size) <= sc->DD_stripe_size) {
 2454                                 physLBA = (uint32_t)virtLBA >>
 2455                                     sc->DD_stripe_exponent;
 2456                                 stripe_unit = physLBA / sc->DD_num_phys_disks;
 2457                                 column = physLBA % sc->DD_num_phys_disks;
 2458                                 pIO_req->DevHandle =
 2459                                     htole16(sc->DD_column_map[column].dev_handle);
 2460                                 /* ???? Is this endian safe*/
 2461                                 cm->cm_desc.SCSIIO.DevHandle =
 2462                                     pIO_req->DevHandle;
 2463 
 2464                                 physLBA = (stripe_unit <<
 2465                                     sc->DD_stripe_exponent) + stripe_offset;
 2466                                 ptrLBA = &pIO_req->CDB.CDB32[1];
 2467                                 physLBA_byte = (uint8_t)(physLBA >> 16);
 2468                                 *ptrLBA = physLBA_byte;
 2469                                 ptrLBA = &pIO_req->CDB.CDB32[2];
 2470                                 physLBA_byte = (uint8_t)(physLBA >> 8);
 2471                                 *ptrLBA = physLBA_byte;
 2472                                 ptrLBA = &pIO_req->CDB.CDB32[3];
 2473                                 physLBA_byte = (uint8_t)physLBA;
 2474                                 *ptrLBA = physLBA_byte;
 2475 
 2476                                 /*
 2477                                  * Set flag that Direct Drive I/O is
 2478                                  * being done.
 2479                                  */
 2480                                 cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
 2481                         }
 2482                 }
 2483                 return;
 2484         }
 2485 
 2486         /*
 2487          * Handle 10, 12 or 16 byte CDBs.
 2488          */
 2489         if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_10) ||
 2490             (CDB[0] == WRITE_10) || (CDB[0] == READ_16) ||
 2491             (CDB[0] == WRITE_16) || (CDB[0] == READ_12) ||
 2492             (CDB[0] == WRITE_12))) {
 2493                 /*
 2494                  * For 16-byte CDB's, verify that the upper 4 bytes of the CDB
 2495                  * are 0.  If not, this is accessing beyond 2TB so handle it in
 2496                  * the else section.  10-byte and 12-byte CDB's are OK.
 2497                  * FreeBSD sends very rare 12 byte READ/WRITE, but driver is 
 2498                  * ready to accept 12byte CDB for Direct IOs.
 2499                  */
 2500                 if ((CDB[0] == READ_10 || CDB[0] == WRITE_10) ||
 2501                     (CDB[0] == READ_12 || CDB[0] == WRITE_12) ||
 2502                     !(CDB[2] | CDB[3] | CDB[4] | CDB[5])) {
 2503                         /*
 2504                          * Get the transfer size in blocks.
 2505                          */
 2506                         io_size = (cm->cm_length >> sc->DD_block_exponent);
 2507 
 2508                         /*
 2509                          * Get virtual LBA.  Point to correct lower 4 bytes of
 2510                          * LBA in the CDB depending on command.
 2511                          */
 2512                         lba_idx = ((CDB[0] == READ_12) || 
 2513                                 (CDB[0] == WRITE_12) ||
 2514                                 (CDB[0] == READ_10) ||
 2515                                 (CDB[0] == WRITE_10))? 2 : 6;
 2516                         virtLBA = ((uint64_t)CDB[lba_idx] << 24) |
 2517                             ((uint64_t)CDB[lba_idx + 1] << 16) |
 2518                             ((uint64_t)CDB[lba_idx + 2] << 8) |
 2519                             (uint64_t)CDB[lba_idx + 3];
 2520 
 2521                         /*
 2522                          * Check that LBA range for I/O does not exceed volume's
 2523                          * MaxLBA.
 2524                          */
 2525                         if ((virtLBA + (uint64_t)io_size - 1) <=
 2526                             sc->DD_max_lba) {
 2527                                 /*
 2528                                  * Check if the I/O crosses a stripe boundary.
 2529                                  * If not, translate the virtual LBA to a
 2530                                  * physical LBA and set the DevHandle for the
 2531                                  * PhysDisk to be used.  If it does cross a
 2532                                  * boundary, do normal I/O.  To get the right
 2533                                  * DevHandle to use, get the map number for the
 2534                                  * column, then use that map number to look up
 2535                                  * the DevHandle of the PhysDisk.
 2536                                  */
 2537                                 stripe_offset = (uint32_t)virtLBA &
 2538                                     (sc->DD_stripe_size - 1);
 2539                                 if ((stripe_offset + io_size) <=
 2540                                     sc->DD_stripe_size) {
 2541                                         physLBA = (uint32_t)virtLBA >>
 2542                                             sc->DD_stripe_exponent;
 2543                                         stripe_unit = physLBA /
 2544                                             sc->DD_num_phys_disks;
 2545                                         column = physLBA %
 2546                                             sc->DD_num_phys_disks;
 2547                                         pIO_req->DevHandle =
 2548                                             htole16(sc->DD_column_map[column].
 2549                                             dev_handle);
 2550                                         cm->cm_desc.SCSIIO.DevHandle =
 2551                                             pIO_req->DevHandle;
 2552 
 2553                                         physLBA = (stripe_unit <<
 2554                                             sc->DD_stripe_exponent) +
 2555                                             stripe_offset;
 2556                                         ptrLBA =
 2557                                             &pIO_req->CDB.CDB32[lba_idx];
 2558                                         physLBA_byte = (uint8_t)(physLBA >> 24);
 2559                                         *ptrLBA = physLBA_byte;
 2560                                         ptrLBA =
 2561                                             &pIO_req->CDB.CDB32[lba_idx + 1];
 2562                                         physLBA_byte = (uint8_t)(physLBA >> 16);
 2563                                         *ptrLBA = physLBA_byte;
 2564                                         ptrLBA =
 2565                                             &pIO_req->CDB.CDB32[lba_idx + 2];
 2566                                         physLBA_byte = (uint8_t)(physLBA >> 8);
 2567                                         *ptrLBA = physLBA_byte;
 2568                                         ptrLBA =
 2569                                             &pIO_req->CDB.CDB32[lba_idx + 3];
 2570                                         physLBA_byte = (uint8_t)physLBA;
 2571                                         *ptrLBA = physLBA_byte;
 2572 
 2573                                         /*
 2574                                          * Set flag that Direct Drive I/O is
 2575                                          * being done.
 2576                                          */
 2577                                         cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
 2578                                 }
 2579                         }
 2580                 } else {
 2581                         /*
 2582                          * 16-byte CDB and the upper 4 bytes of the CDB are not
 2583                          * 0.  Get the transfer size in blocks.
 2584                          */
 2585                         io_size = (cm->cm_length >> sc->DD_block_exponent);
 2586 
 2587                         /*
 2588                          * Get virtual LBA.
 2589                          */
 2590                         virtLBA = ((uint64_t)CDB[2] << 54) |
 2591                             ((uint64_t)CDB[3] << 48) |
 2592                             ((uint64_t)CDB[4] << 40) |
 2593                             ((uint64_t)CDB[5] << 32) |
 2594                             ((uint64_t)CDB[6] << 24) |
 2595                             ((uint64_t)CDB[7] << 16) |
 2596                             ((uint64_t)CDB[8] << 8) |
 2597                             (uint64_t)CDB[9]; 
 2598 
 2599                         /*
 2600                          * Check that LBA range for I/O does not exceed volume's
 2601                          * MaxLBA.
 2602                          */
 2603                         if ((virtLBA + (uint64_t)io_size - 1) <=
 2604                             sc->DD_max_lba) {
 2605                                 /*
 2606                                  * Check if the I/O crosses a stripe boundary.
 2607                                  * If not, translate the virtual LBA to a
 2608                                  * physical LBA and set the DevHandle for the
 2609                                  * PhysDisk to be used.  If it does cross a
 2610                                  * boundary, do normal I/O.  To get the right
 2611                                  * DevHandle to use, get the map number for the
 2612                                  * column, then use that map number to look up
 2613                                  * the DevHandle of the PhysDisk.
 2614                                  */
 2615                                 stripe_offset = (uint32_t)virtLBA &
 2616                                     (sc->DD_stripe_size - 1);
 2617                                 if ((stripe_offset + io_size) <=
 2618                                     sc->DD_stripe_size) {
 2619                                         physLBA = (uint32_t)(virtLBA >>
 2620                                             sc->DD_stripe_exponent);
 2621                                         stripe_unit = physLBA /
 2622                                             sc->DD_num_phys_disks;
 2623                                         column = physLBA %
 2624                                             sc->DD_num_phys_disks;
 2625                                         pIO_req->DevHandle =
 2626                                             htole16(sc->DD_column_map[column].
 2627                                             dev_handle);
 2628                                         cm->cm_desc.SCSIIO.DevHandle =
 2629                                             pIO_req->DevHandle;
 2630 
 2631                                         physLBA = (stripe_unit <<
 2632                                             sc->DD_stripe_exponent) +
 2633                                             stripe_offset;
 2634 
 2635                                         /*
 2636                                          * Set upper 4 bytes of LBA to 0.  We
 2637                                          * assume that the phys disks are less
 2638                                          * than 2 TB's in size.  Then, set the
 2639                                          * lower 4 bytes.
 2640                                          */
 2641                                         pIO_req->CDB.CDB32[2] = 0;
 2642                                         pIO_req->CDB.CDB32[3] = 0;
 2643                                         pIO_req->CDB.CDB32[4] = 0;
 2644                                         pIO_req->CDB.CDB32[5] = 0;
 2645                                         ptrLBA = &pIO_req->CDB.CDB32[6];
 2646                                         physLBA_byte = (uint8_t)(physLBA >> 24);
 2647                                         *ptrLBA = physLBA_byte;
 2648                                         ptrLBA = &pIO_req->CDB.CDB32[7];
 2649                                         physLBA_byte = (uint8_t)(physLBA >> 16);
 2650                                         *ptrLBA = physLBA_byte;
 2651                                         ptrLBA = &pIO_req->CDB.CDB32[8];
 2652                                         physLBA_byte = (uint8_t)(physLBA >> 8);
 2653                                         *ptrLBA = physLBA_byte;
 2654                                         ptrLBA = &pIO_req->CDB.CDB32[9];
 2655                                         physLBA_byte = (uint8_t)physLBA;
 2656                                         *ptrLBA = physLBA_byte;
 2657 
 2658                                         /*
 2659                                          * Set flag that Direct Drive I/O is
 2660                                          * being done.
 2661                                          */
 2662                                         cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
 2663                                 }
 2664                         }
 2665                 }
 2666         }
 2667 }
 2668 
 2669 #if __FreeBSD_version >= 900026
 2670 static void
 2671 mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm)
 2672 {
 2673         MPI2_SMP_PASSTHROUGH_REPLY *rpl;
 2674         MPI2_SMP_PASSTHROUGH_REQUEST *req;
 2675         uint64_t sasaddr;
 2676         union ccb *ccb;
 2677 
 2678         ccb = cm->cm_complete_data;
 2679 
 2680         /*
 2681          * Currently there should be no way we can hit this case.  It only
 2682          * happens when we have a failure to allocate chain frames, and SMP
 2683          * commands require two S/G elements only.  That should be handled
 2684          * in the standard request size.
 2685          */
 2686         if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
 2687                 mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x on SMP request!\n",
 2688                            __func__, cm->cm_flags);
 2689                 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
 2690                 goto bailout;
 2691         }
 2692 
 2693         rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
 2694         if (rpl == NULL) {
 2695                 mps_dprint(sc, MPS_ERROR, "%s: NULL cm_reply!\n", __func__);
 2696                 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
 2697                 goto bailout;
 2698         }
 2699 
 2700         req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
 2701         sasaddr = le32toh(req->SASAddress.Low);
 2702         sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
 2703 
 2704         if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
 2705             MPI2_IOCSTATUS_SUCCESS ||
 2706             rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
 2707                 mps_dprint(sc, MPS_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
 2708                     __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
 2709                 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
 2710                 goto bailout;
 2711         }
 2712 
 2713         mps_dprint(sc, MPS_XINFO, "%s: SMP request to SAS address "
 2714                    "%#jx completed successfully\n", __func__,
 2715                    (uintmax_t)sasaddr);
 2716 
 2717         if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
 2718                 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
 2719         else
 2720                 mpssas_set_ccbstatus(ccb, CAM_SMP_STATUS_ERROR);
 2721 
 2722 bailout:
 2723         /*
 2724          * We sync in both directions because we had DMAs in the S/G list
 2725          * in both directions.
 2726          */
 2727         bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
 2728                         BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
 2729         bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
 2730         mps_free_command(sc, cm);
 2731         xpt_done(ccb);
 2732 }
 2733 
 2734 static void
 2735 mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
 2736 {
 2737         struct mps_command *cm;
 2738         uint8_t *request, *response;
 2739         MPI2_SMP_PASSTHROUGH_REQUEST *req;
 2740         struct mps_softc *sc;
 2741         int error;
 2742 
 2743         sc = sassc->sc;
 2744         error = 0;
 2745 
 2746         /*
 2747          * XXX We don't yet support physical addresses here.
 2748          */
 2749         switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) {
 2750         case CAM_DATA_PADDR:
 2751         case CAM_DATA_SG_PADDR:
 2752                 mps_dprint(sc, MPS_ERROR,
 2753                            "%s: physical addresses not supported\n", __func__);
 2754                 mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
 2755                 xpt_done(ccb);
 2756                 return;
 2757         case CAM_DATA_SG:
 2758                 /*
 2759                  * The chip does not support more than one buffer for the
 2760                  * request or response.
 2761                  */
 2762                 if ((ccb->smpio.smp_request_sglist_cnt > 1)
 2763                   || (ccb->smpio.smp_response_sglist_cnt > 1)) {
 2764                         mps_dprint(sc, MPS_ERROR,
 2765                                    "%s: multiple request or response "
 2766                                    "buffer segments not supported for SMP\n",
 2767                                    __func__);
 2768                         mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
 2769                         xpt_done(ccb);
 2770                         return;
 2771                 }
 2772 
 2773                 /*
 2774                  * The CAM_SCATTER_VALID flag was originally implemented
 2775                  * for the XPT_SCSI_IO CCB, which only has one data pointer.
 2776                  * We have two.  So, just take that flag to mean that we
 2777                  * might have S/G lists, and look at the S/G segment count
 2778                  * to figure out whether that is the case for each individual
 2779                  * buffer.
 2780                  */
 2781                 if (ccb->smpio.smp_request_sglist_cnt != 0) {
 2782                         bus_dma_segment_t *req_sg;
 2783 
 2784                         req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
 2785                         request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
 2786                 } else
 2787                         request = ccb->smpio.smp_request;
 2788 
 2789                 if (ccb->smpio.smp_response_sglist_cnt != 0) {
 2790                         bus_dma_segment_t *rsp_sg;
 2791 
 2792                         rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
 2793                         response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
 2794                 } else
 2795                         response = ccb->smpio.smp_response;
 2796                 break;
 2797         case CAM_DATA_VADDR:
 2798                 request = ccb->smpio.smp_request;
 2799                 response = ccb->smpio.smp_response;
 2800                 break;
 2801         default:
 2802                 mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
 2803                 xpt_done(ccb);
 2804                 return;
 2805         }
 2806 
 2807         cm = mps_alloc_command(sc);
 2808         if (cm == NULL) {
 2809                 mps_dprint(sc, MPS_ERROR,
 2810                     "%s: cannot allocate command\n", __func__);
 2811                 mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
 2812                 xpt_done(ccb);
 2813                 return;
 2814         }
 2815 
 2816         req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
 2817         bzero(req, sizeof(*req));
 2818         req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
 2819 
 2820         /* Allow the chip to use any route to this SAS address. */
 2821         req->PhysicalPort = 0xff;
 2822 
 2823         req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
 2824         req->SGLFlags = 
 2825             MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
 2826 
 2827         mps_dprint(sc, MPS_XINFO, "%s: sending SMP request to SAS "
 2828             "address %#jx\n", __func__, (uintmax_t)sasaddr);
 2829 
 2830         mpi_init_sge(cm, req, &req->SGL);
 2831 
 2832         /*
 2833          * Set up a uio to pass into mps_map_command().  This allows us to
 2834          * do one map command, and one busdma call in there.
 2835          */
 2836         cm->cm_uio.uio_iov = cm->cm_iovec;
 2837         cm->cm_uio.uio_iovcnt = 2;
 2838         cm->cm_uio.uio_segflg = UIO_SYSSPACE;
 2839 
 2840         /*
 2841          * The read/write flag isn't used by busdma, but set it just in
 2842          * case.  This isn't exactly accurate, either, since we're going in
 2843          * both directions.
 2844          */
 2845         cm->cm_uio.uio_rw = UIO_WRITE;
 2846 
 2847         cm->cm_iovec[0].iov_base = request;
 2848         cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
 2849         cm->cm_iovec[1].iov_base = response;
 2850         cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
 2851 
 2852         cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
 2853                                cm->cm_iovec[1].iov_len;
 2854 
 2855         /*
 2856          * Trigger a warning message in mps_data_cb() for the user if we
 2857          * wind up exceeding two S/G segments.  The chip expects one
 2858          * segment for the request and another for the response.
 2859          */
 2860         cm->cm_max_segs = 2;
 2861 
 2862         cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
 2863         cm->cm_complete = mpssas_smpio_complete;
 2864         cm->cm_complete_data = ccb;
 2865 
 2866         /*
 2867          * Tell the mapping code that we're using a uio, and that this is
 2868          * an SMP passthrough request.  There is a little special-case
 2869          * logic there (in mps_data_cb()) to handle the bidirectional
 2870          * transfer.  
 2871          */
 2872         cm->cm_flags |= MPS_CM_FLAGS_USE_UIO | MPS_CM_FLAGS_SMP_PASS |
 2873                         MPS_CM_FLAGS_DATAIN | MPS_CM_FLAGS_DATAOUT;
 2874 
 2875         /* The chip data format is little endian. */
 2876         req->SASAddress.High = htole32(sasaddr >> 32);
 2877         req->SASAddress.Low = htole32(sasaddr);
 2878 
 2879         /*
 2880          * XXX Note that we don't have a timeout/abort mechanism here.
 2881          * From the manual, it looks like task management requests only
 2882          * work for SCSI IO and SATA passthrough requests.  We may need to
 2883          * have a mechanism to retry requests in the event of a chip reset
 2884          * at least.  Hopefully the chip will insure that any errors short
 2885          * of that are relayed back to the driver.
 2886          */
 2887         error = mps_map_command(sc, cm);
 2888         if ((error != 0) && (error != EINPROGRESS)) {
 2889                 mps_dprint(sc, MPS_ERROR,
 2890                            "%s: error %d returned from mps_map_command()\n",
 2891                            __func__, error);
 2892                 goto bailout_error;
 2893         }
 2894 
 2895         return;
 2896 
 2897 bailout_error:
 2898         mps_free_command(sc, cm);
 2899         mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
 2900         xpt_done(ccb);
 2901         return;
 2902 
 2903 }
 2904 
 2905 static void
 2906 mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb)
 2907 {
 2908         struct mps_softc *sc;
 2909         struct mpssas_target *targ;
 2910         uint64_t sasaddr = 0;
 2911 
 2912         sc = sassc->sc;
 2913 
 2914         /*
 2915          * Make sure the target exists.
 2916          */
 2917         KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
 2918             ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id));
 2919         targ = &sassc->targets[ccb->ccb_h.target_id];
 2920         if (targ->handle == 0x0) {
 2921                 mps_dprint(sc, MPS_ERROR,
 2922                            "%s: target %d does not exist!\n", __func__,
 2923                            ccb->ccb_h.target_id);
 2924                 mpssas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
 2925                 xpt_done(ccb);
 2926                 return;
 2927         }
 2928 
 2929         /*
 2930          * If this device has an embedded SMP target, we'll talk to it
 2931          * directly.
 2932          * figure out what the expander's address is.
 2933          */
 2934         if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
 2935                 sasaddr = targ->sasaddr;
 2936 
 2937         /*
 2938          * If we don't have a SAS address for the expander yet, try
 2939          * grabbing it from the page 0x83 information cached in the
 2940          * transport layer for this target.  LSI expanders report the
 2941          * expander SAS address as the port-associated SAS address in
 2942          * Inquiry VPD page 0x83.  Maxim expanders don't report it in page
 2943          * 0x83.
 2944          *
 2945          * XXX KDM disable this for now, but leave it commented out so that
 2946          * it is obvious that this is another possible way to get the SAS
 2947          * address.
 2948          *
 2949          * The parent handle method below is a little more reliable, and
 2950          * the other benefit is that it works for devices other than SES
 2951          * devices.  So you can send a SMP request to a da(4) device and it
 2952          * will get routed to the expander that device is attached to.
 2953          * (Assuming the da(4) device doesn't contain an SMP target...)
 2954          */
 2955 #if 0
 2956         if (sasaddr == 0)
 2957                 sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
 2958 #endif
 2959 
 2960         /*
 2961          * If we still don't have a SAS address for the expander, look for
 2962          * the parent device of this device, which is probably the expander.
 2963          */
 2964         if (sasaddr == 0) {
 2965 #ifdef OLD_MPS_PROBE
 2966                 struct mpssas_target *parent_target;
 2967 #endif
 2968 
 2969                 if (targ->parent_handle == 0x0) {
 2970                         mps_dprint(sc, MPS_ERROR,
 2971                                    "%s: handle %d does not have a valid "
 2972                                    "parent handle!\n", __func__, targ->handle);
 2973                         mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
 2974                         goto bailout;
 2975                 }
 2976 #ifdef OLD_MPS_PROBE
 2977                 parent_target = mpssas_find_target_by_handle(sassc, 0,
 2978                         targ->parent_handle);
 2979 
 2980                 if (parent_target == NULL) {
 2981                         mps_dprint(sc, MPS_ERROR,
 2982                                    "%s: handle %d does not have a valid "
 2983                                    "parent target!\n", __func__, targ->handle);
 2984                         mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
 2985                         goto bailout;
 2986                 }
 2987 
 2988                 if ((parent_target->devinfo &
 2989                      MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
 2990                         mps_dprint(sc, MPS_ERROR,
 2991                                    "%s: handle %d parent %d does not "
 2992                                    "have an SMP target!\n", __func__,
 2993                                    targ->handle, parent_target->handle);
 2994                         mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
 2995                         goto bailout;
 2996 
 2997                 }
 2998 
 2999                 sasaddr = parent_target->sasaddr;
 3000 #else /* OLD_MPS_PROBE */
 3001                 if ((targ->parent_devinfo &
 3002                      MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
 3003                         mps_dprint(sc, MPS_ERROR,
 3004                                    "%s: handle %d parent %d does not "
 3005                                    "have an SMP target!\n", __func__,
 3006                                    targ->handle, targ->parent_handle);
 3007                         mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
 3008                         goto bailout;
 3009 
 3010                 }
 3011                 if (targ->parent_sasaddr == 0x0) {
 3012                         mps_dprint(sc, MPS_ERROR,
 3013                                    "%s: handle %d parent handle %d does "
 3014                                    "not have a valid SAS address!\n",
 3015                                    __func__, targ->handle, targ->parent_handle);
 3016                         mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
 3017                         goto bailout;
 3018                 }
 3019 
 3020                 sasaddr = targ->parent_sasaddr;
 3021 #endif /* OLD_MPS_PROBE */
 3022 
 3023         }
 3024 
 3025         if (sasaddr == 0) {
 3026                 mps_dprint(sc, MPS_INFO,
 3027                            "%s: unable to find SAS address for handle %d\n",
 3028                            __func__, targ->handle);
 3029                 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
 3030                 goto bailout;
 3031         }
 3032         mpssas_send_smpcmd(sassc, ccb, sasaddr);
 3033 
 3034         return;
 3035 
 3036 bailout:
 3037         xpt_done(ccb);
 3038 
 3039 }
 3040 #endif //__FreeBSD_version >= 900026
 3041 
 3042 static void
 3043 mpssas_action_resetdev(struct mpssas_softc *sassc, union ccb *ccb)
 3044 {
 3045         MPI2_SCSI_TASK_MANAGE_REQUEST *req;
 3046         struct mps_softc *sc;
 3047         struct mps_command *tm;
 3048         struct mpssas_target *targ;
 3049 
 3050         MPS_FUNCTRACE(sassc->sc);
 3051         mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
 3052 
 3053         KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
 3054             ("Target %d out of bounds in XPT_RESET_DEV\n",
 3055              ccb->ccb_h.target_id));
 3056         sc = sassc->sc;
 3057         tm = mps_alloc_command(sc);
 3058         if (tm == NULL) {
 3059                 mps_dprint(sc, MPS_ERROR,
 3060                     "command alloc failure in mpssas_action_resetdev\n");
 3061                 mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
 3062                 xpt_done(ccb);
 3063                 return;
 3064         }
 3065 
 3066         targ = &sassc->targets[ccb->ccb_h.target_id];
 3067         req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
 3068         req->DevHandle = htole16(targ->handle);
 3069         req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
 3070         req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
 3071 
 3072         /* SAS Hard Link Reset / SATA Link Reset */
 3073         req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
 3074 
 3075         tm->cm_data = NULL;
 3076         tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
 3077         tm->cm_complete = mpssas_resetdev_complete;
 3078         tm->cm_complete_data = ccb;
 3079         tm->cm_targ = targ;
 3080         targ->flags |= MPSSAS_TARGET_INRESET;
 3081 
 3082         mps_map_command(sc, tm);
 3083 }
 3084 
 3085 static void
 3086 mpssas_resetdev_complete(struct mps_softc *sc, struct mps_command *tm)
 3087 {
 3088         MPI2_SCSI_TASK_MANAGE_REPLY *resp;
 3089         union ccb *ccb;
 3090 
 3091         MPS_FUNCTRACE(sc);
 3092         mtx_assert(&sc->mps_mtx, MA_OWNED);
 3093 
 3094         resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
 3095         ccb = tm->cm_complete_data;
 3096 
 3097         /*
 3098          * Currently there should be no way we can hit this case.  It only
 3099          * happens when we have a failure to allocate chain frames, and
 3100          * task management commands don't have S/G lists.
 3101          */
 3102         if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
 3103                 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
 3104 
 3105                 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
 3106 
 3107                 mps_dprint(sc, MPS_ERROR,
 3108                            "%s: cm_flags = %#x for reset of handle %#04x! "
 3109                            "This should not happen!\n", __func__, tm->cm_flags,
 3110                            req->DevHandle);
 3111                 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
 3112                 goto bailout;
 3113         }
 3114 
 3115         mps_dprint(sc, MPS_XINFO,
 3116             "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n", __func__,
 3117             le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
 3118 
 3119         if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
 3120                 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
 3121                 mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
 3122                     CAM_LUN_WILDCARD);
 3123         }
 3124         else
 3125                 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
 3126 
 3127 bailout:
 3128 
 3129         mpssas_free_tm(sc, tm);
 3130         xpt_done(ccb);
 3131 }
 3132 
 3133 static void
 3134 mpssas_poll(struct cam_sim *sim)
 3135 {
 3136         struct mpssas_softc *sassc;
 3137 
 3138         sassc = cam_sim_softc(sim);
 3139 
 3140         if (sassc->sc->mps_debug & MPS_TRACE) {
 3141                 /* frequent debug messages during a panic just slow
 3142                  * everything down too much.
 3143                  */
 3144                 mps_printf(sassc->sc, "%s clearing MPS_TRACE\n", __func__);
 3145                 sassc->sc->mps_debug &= ~MPS_TRACE;
 3146         }
 3147 
 3148         mps_intr_locked(sassc->sc);
 3149 }
 3150 
 3151 static void
 3152 mpssas_async(void *callback_arg, uint32_t code, struct cam_path *path,
 3153              void *arg)
 3154 {
 3155         struct mps_softc *sc;
 3156 
 3157         sc = (struct mps_softc *)callback_arg;
 3158 
 3159         switch (code) {
 3160 #if (__FreeBSD_version >= 1000006) || \
 3161     ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
 3162         case AC_ADVINFO_CHANGED: {
 3163                 struct mpssas_target *target;
 3164                 struct mpssas_softc *sassc;
 3165                 struct scsi_read_capacity_data_long rcap_buf;
 3166                 struct ccb_dev_advinfo cdai;
 3167                 struct mpssas_lun *lun;
 3168                 lun_id_t lunid;
 3169                 int found_lun;
 3170                 uintptr_t buftype;
 3171 
 3172                 buftype = (uintptr_t)arg;
 3173 
 3174                 found_lun = 0;
 3175                 sassc = sc->sassc;
 3176 
 3177                 /*
 3178                  * We're only interested in read capacity data changes.
 3179                  */
 3180                 if (buftype != CDAI_TYPE_RCAPLONG)
 3181                         break;
 3182 
 3183                 /*
 3184                  * We should have a handle for this, but check to make sure.
 3185                  */
 3186                 KASSERT(xpt_path_target_id(path) < sassc->maxtargets,
 3187                     ("Target %d out of bounds in mpssas_async\n",
 3188                     xpt_path_target_id(path)));
 3189                 target = &sassc->targets[xpt_path_target_id(path)];
 3190                 if (target->handle == 0)
 3191                         break;
 3192 
 3193                 lunid = xpt_path_lun_id(path);
 3194 
 3195                 SLIST_FOREACH(lun, &target->luns, lun_link) {
 3196                         if (lun->lun_id == lunid) {
 3197                                 found_lun = 1;
 3198                                 break;
 3199                         }
 3200                 }
 3201 
 3202                 if (found_lun == 0) {
 3203                         lun = malloc(sizeof(struct mpssas_lun), M_MPT2,
 3204                                      M_NOWAIT | M_ZERO);
 3205                         if (lun == NULL) {
 3206                                 mps_dprint(sc, MPS_ERROR, "Unable to alloc "
 3207                                            "LUN for EEDP support.\n");
 3208                                 break;
 3209                         }
 3210                         lun->lun_id = lunid;
 3211                         SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
 3212                 }
 3213 
 3214                 bzero(&rcap_buf, sizeof(rcap_buf));
 3215                 xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
 3216                 cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
 3217                 cdai.ccb_h.flags = CAM_DIR_IN;
 3218                 cdai.buftype = CDAI_TYPE_RCAPLONG;
 3219 #if (__FreeBSD_version >= 1100061) || \
 3220     ((__FreeBSD_version >= 1001510) && (__FreeBSD_version < 1100000))
 3221                 cdai.flags = CDAI_FLAG_NONE;
 3222 #else
 3223                 cdai.flags = 0;
 3224 #endif
 3225                 cdai.bufsiz = sizeof(rcap_buf);
 3226                 cdai.buf = (uint8_t *)&rcap_buf;
 3227                 xpt_action((union ccb *)&cdai);
 3228                 if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
 3229                         cam_release_devq(cdai.ccb_h.path,
 3230                                          0, 0, 0, FALSE);
 3231 
 3232                 if ((mpssas_get_ccbstatus((union ccb *)&cdai) == CAM_REQ_CMP)
 3233                  && (rcap_buf.prot & SRC16_PROT_EN)) {
 3234                         switch (rcap_buf.prot & SRC16_P_TYPE) {
 3235                         case SRC16_PTYPE_1:
 3236                         case SRC16_PTYPE_3:
 3237                                 lun->eedp_formatted = TRUE;
 3238                                 lun->eedp_block_size =
 3239                                     scsi_4btoul(rcap_buf.length);
 3240                                 break;
 3241                         case SRC16_PTYPE_2:
 3242                         default:
 3243                                 lun->eedp_formatted = FALSE;
 3244                                 lun->eedp_block_size = 0;
 3245                                 break;
 3246                         }
 3247                 } else {
 3248                         lun->eedp_formatted = FALSE;
 3249                         lun->eedp_block_size = 0;
 3250                 }
 3251                 break;
 3252         }
 3253 #else
 3254         case AC_FOUND_DEVICE: {
 3255                 struct ccb_getdev *cgd;
 3256 
 3257                 cgd = arg;
 3258                 mpssas_check_eedp(sc, path, cgd);
 3259                 break;
 3260         }
 3261 #endif
 3262         default:
 3263                 break;
 3264         }
 3265 }
 3266 
 3267 #if (__FreeBSD_version < 901503) || \
 3268     ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
 3269 static void
 3270 mpssas_check_eedp(struct mps_softc *sc, struct cam_path *path,
 3271                   struct ccb_getdev *cgd)
 3272 {
 3273         struct mpssas_softc *sassc = sc->sassc;
 3274         struct ccb_scsiio *csio;
 3275         struct scsi_read_capacity_16 *scsi_cmd;
 3276         struct scsi_read_capacity_eedp *rcap_buf;
 3277         path_id_t pathid;
 3278         target_id_t targetid;
 3279         lun_id_t lunid;
 3280         union ccb *ccb;
 3281         struct cam_path *local_path;
 3282         struct mpssas_target *target;
 3283         struct mpssas_lun *lun;
 3284         uint8_t found_lun;
 3285         char path_str[64];
 3286 
 3287         sassc = sc->sassc;
 3288         pathid = cam_sim_path(sassc->sim);
 3289         targetid = xpt_path_target_id(path);
 3290         lunid = xpt_path_lun_id(path);
 3291 
 3292         KASSERT(targetid < sassc->maxtargets,
 3293             ("Target %d out of bounds in mpssas_check_eedp\n",
 3294              targetid));
 3295         target = &sassc->targets[targetid];
 3296         if (target->handle == 0x0)
 3297                 return;
 3298 
 3299         /*
 3300          * Determine if the device is EEDP capable.
 3301          *
 3302          * If this flag is set in the inquiry data, 
 3303          * the device supports protection information,
 3304          * and must support the 16 byte read
 3305          * capacity command, otherwise continue without
 3306          * sending read cap 16
 3307          */
 3308         if ((cgd->inq_data.spc3_flags & SPC3_SID_PROTECT) == 0)
 3309                 return;
 3310 
 3311         /*
 3312          * Issue a READ CAPACITY 16 command.  This info
 3313          * is used to determine if the LUN is formatted
 3314          * for EEDP support.
 3315          */
 3316         ccb = xpt_alloc_ccb_nowait();
 3317         if (ccb == NULL) {
 3318                 mps_dprint(sc, MPS_ERROR, "Unable to alloc CCB "
 3319                     "for EEDP support.\n");
 3320                 return;
 3321         }
 3322 
 3323         if (xpt_create_path(&local_path, xpt_periph,
 3324             pathid, targetid, lunid) != CAM_REQ_CMP) {
 3325                 mps_dprint(sc, MPS_ERROR, "Unable to create "
 3326                     "path for EEDP support\n");
 3327                 xpt_free_ccb(ccb);
 3328                 return;
 3329         }
 3330 
 3331         /*
 3332          * If LUN is already in list, don't create a new
 3333          * one.
 3334          */
 3335         found_lun = FALSE;
 3336         SLIST_FOREACH(lun, &target->luns, lun_link) {
 3337                 if (lun->lun_id == lunid) {
 3338                         found_lun = TRUE;
 3339                         break;
 3340                 }
 3341         }
 3342         if (!found_lun) {
 3343                 lun = malloc(sizeof(struct mpssas_lun), M_MPT2,
 3344                     M_NOWAIT | M_ZERO);
 3345                 if (lun == NULL) {
 3346                         mps_dprint(sc, MPS_ERROR,
 3347                             "Unable to alloc LUN for EEDP support.\n");
 3348                         xpt_free_path(local_path);
 3349                         xpt_free_ccb(ccb);
 3350                         return;
 3351                 }
 3352                 lun->lun_id = lunid;
 3353                 SLIST_INSERT_HEAD(&target->luns, lun,
 3354                     lun_link);
 3355         }
 3356 
 3357         xpt_path_string(local_path, path_str, sizeof(path_str));
 3358 
 3359         mps_dprint(sc, MPS_INFO, "Sending read cap: path %s handle %d\n",
 3360             path_str, target->handle);
 3361 
 3362         /*
 3363          * Issue a READ CAPACITY 16 command for the LUN.
 3364          * The mpssas_read_cap_done function will load
 3365          * the read cap info into the LUN struct.
 3366          */
 3367         rcap_buf = malloc(sizeof(struct scsi_read_capacity_eedp),
 3368             M_MPT2, M_NOWAIT | M_ZERO);
 3369         if (rcap_buf == NULL) {
 3370                 mps_dprint(sc, MPS_FAULT,
 3371                     "Unable to alloc read capacity buffer for EEDP support.\n");
 3372                 xpt_free_path(ccb->ccb_h.path);
 3373                 xpt_free_ccb(ccb);
 3374                 return;
 3375         }
 3376         xpt_setup_ccb(&ccb->ccb_h, local_path, CAM_PRIORITY_XPT);
 3377         csio = &ccb->csio;
 3378         csio->ccb_h.func_code = XPT_SCSI_IO;
 3379         csio->ccb_h.flags = CAM_DIR_IN;
 3380         csio->ccb_h.retry_count = 4;    
 3381         csio->ccb_h.cbfcnp = mpssas_read_cap_done;
 3382         csio->ccb_h.timeout = 60000;
 3383         csio->data_ptr = (uint8_t *)rcap_buf;
 3384         csio->dxfer_len = sizeof(struct scsi_read_capacity_eedp);
 3385         csio->sense_len = MPS_SENSE_LEN;
 3386         csio->cdb_len = sizeof(*scsi_cmd);
 3387         csio->tag_action = MSG_SIMPLE_Q_TAG;
 3388 
 3389         scsi_cmd = (struct scsi_read_capacity_16 *)&csio->cdb_io.cdb_bytes;
 3390         bzero(scsi_cmd, sizeof(*scsi_cmd));
 3391         scsi_cmd->opcode = 0x9E;
 3392         scsi_cmd->service_action = SRC16_SERVICE_ACTION;
 3393         ((uint8_t *)scsi_cmd)[13] = sizeof(struct scsi_read_capacity_eedp);
 3394 
 3395         ccb->ccb_h.ppriv_ptr1 = sassc;
 3396         xpt_action(ccb);
 3397 }
 3398 
 3399 static void
 3400 mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb)
 3401 {
 3402         struct mpssas_softc *sassc;
 3403         struct mpssas_target *target;
 3404         struct mpssas_lun *lun;
 3405         struct scsi_read_capacity_eedp *rcap_buf;
 3406 
 3407         if (done_ccb == NULL)
 3408                 return;
 3409         
 3410         /* Driver need to release devq, it Scsi command is
 3411          * generated by driver internally.
 3412          * Currently there is a single place where driver
 3413          * calls scsi command internally. In future if driver
 3414          * calls more scsi command internally, it needs to release
 3415          * devq internally, since those command will not go back to
 3416          * cam_periph.
 3417          */
 3418         if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) ) {
 3419                 done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
 3420                 xpt_release_devq(done_ccb->ccb_h.path,
 3421                                 /*count*/ 1, /*run_queue*/TRUE);
 3422         }
 3423 
 3424         rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr;
 3425 
 3426         /*
 3427          * Get the LUN ID for the path and look it up in the LUN list for the
 3428          * target.
 3429          */
 3430         sassc = (struct mpssas_softc *)done_ccb->ccb_h.ppriv_ptr1;
 3431         KASSERT(done_ccb->ccb_h.target_id < sassc->maxtargets,
 3432             ("Target %d out of bounds in mpssas_read_cap_done\n",
 3433              done_ccb->ccb_h.target_id));
 3434         target = &sassc->targets[done_ccb->ccb_h.target_id];
 3435         SLIST_FOREACH(lun, &target->luns, lun_link) {
 3436                 if (lun->lun_id != done_ccb->ccb_h.target_lun)
 3437                         continue;
 3438 
 3439                 /*
 3440                  * Got the LUN in the target's LUN list.  Fill it in
 3441                  * with EEDP info.  If the READ CAP 16 command had some
 3442                  * SCSI error (common if command is not supported), mark
 3443                  * the lun as not supporting EEDP and set the block size
 3444                  * to 0.
 3445                  */
 3446                 if ((mpssas_get_ccbstatus(done_ccb) != CAM_REQ_CMP)
 3447                  || (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) {
 3448                         lun->eedp_formatted = FALSE;
 3449                         lun->eedp_block_size = 0;
 3450                         break;
 3451                 }
 3452 
 3453                 if (rcap_buf->protect & 0x01) {
 3454                         mps_dprint(sassc->sc, MPS_INFO, "LUN %d for "
 3455                             "target ID %d is formatted for EEDP "
 3456                             "support.\n", done_ccb->ccb_h.target_lun,
 3457                             done_ccb->ccb_h.target_id);
 3458                         lun->eedp_formatted = TRUE;
 3459                         lun->eedp_block_size = scsi_4btoul(rcap_buf->length);
 3460                 }
 3461                 break;
 3462         }
 3463 
 3464         // Finished with this CCB and path.
 3465         free(rcap_buf, M_MPT2);
 3466         xpt_free_path(done_ccb->ccb_h.path);
 3467         xpt_free_ccb(done_ccb);
 3468 }
 3469 #endif /* (__FreeBSD_version < 901503) || \
 3470           ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) */
 3471 
 3472 void
 3473 mpssas_prepare_for_tm(struct mps_softc *sc, struct mps_command *tm,
 3474     struct mpssas_target *target, lun_id_t lun_id)
 3475 {
 3476         union ccb *ccb;
 3477         path_id_t path_id;
 3478 
 3479         /*
 3480          * Set the INRESET flag for this target so that no I/O will be sent to
 3481          * the target until the reset has completed.  If an I/O request does
 3482          * happen, the devq will be frozen.  The CCB holds the path which is
 3483          * used to release the devq.  The devq is released and the CCB is freed
 3484          * when the TM completes.
 3485          */
 3486         ccb = xpt_alloc_ccb_nowait();
 3487         if (ccb) {
 3488                 path_id = cam_sim_path(sc->sassc->sim);
 3489                 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, path_id,
 3490                     target->tid, lun_id) != CAM_REQ_CMP) {
 3491                         xpt_free_ccb(ccb);
 3492                 } else {
 3493                         tm->cm_ccb = ccb;
 3494                         tm->cm_targ = target;
 3495                         target->flags |= MPSSAS_TARGET_INRESET;
 3496                 }
 3497         }
 3498 }
 3499 
 3500 int
 3501 mpssas_startup(struct mps_softc *sc)
 3502 {
 3503 
 3504         /*
 3505          * Send the port enable message and set the wait_for_port_enable flag.
 3506          * This flag helps to keep the simq frozen until all discovery events
 3507          * are processed.
 3508          */
 3509         sc->wait_for_port_enable = 1;
 3510         mpssas_send_portenable(sc);
 3511         return (0);
 3512 }
 3513 
 3514 static int
 3515 mpssas_send_portenable(struct mps_softc *sc)
 3516 {
 3517         MPI2_PORT_ENABLE_REQUEST *request;
 3518         struct mps_command *cm;
 3519 
 3520         MPS_FUNCTRACE(sc);
 3521 
 3522         if ((cm = mps_alloc_command(sc)) == NULL)
 3523                 return (EBUSY);
 3524         request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
 3525         request->Function = MPI2_FUNCTION_PORT_ENABLE;
 3526         request->MsgFlags = 0;
 3527         request->VP_ID = 0;
 3528         cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
 3529         cm->cm_complete = mpssas_portenable_complete;
 3530         cm->cm_data = NULL;
 3531         cm->cm_sge = NULL;
 3532 
 3533         mps_map_command(sc, cm);
 3534         mps_dprint(sc, MPS_XINFO, 
 3535             "mps_send_portenable finished cm %p req %p complete %p\n",
 3536             cm, cm->cm_req, cm->cm_complete);
 3537         return (0);
 3538 }
 3539 
 3540 static void
 3541 mpssas_portenable_complete(struct mps_softc *sc, struct mps_command *cm)
 3542 {
 3543         MPI2_PORT_ENABLE_REPLY *reply;
 3544         struct mpssas_softc *sassc;
 3545 
 3546         MPS_FUNCTRACE(sc);
 3547         sassc = sc->sassc;
 3548 
 3549         /*
 3550          * Currently there should be no way we can hit this case.  It only
 3551          * happens when we have a failure to allocate chain frames, and
 3552          * port enable commands don't have S/G lists.
 3553          */
 3554         if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
 3555                 mps_dprint(sc, MPS_ERROR, "%s: cm_flags = %#x for port enable! "
 3556                            "This should not happen!\n", __func__, cm->cm_flags);
 3557         }
 3558 
 3559         reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
 3560         if (reply == NULL)
 3561                 mps_dprint(sc, MPS_FAULT, "Portenable NULL reply\n");
 3562         else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
 3563             MPI2_IOCSTATUS_SUCCESS)
 3564                 mps_dprint(sc, MPS_FAULT, "Portenable failed\n");
 3565 
 3566         mps_free_command(sc, cm);
 3567 
 3568         /*
 3569          * Get WarpDrive info after discovery is complete but before the scan
 3570          * starts.  At this point, all devices are ready to be exposed to the
 3571          * OS.  If devices should be hidden instead, take them out of the
 3572          * 'targets' array before the scan.  The devinfo for a disk will have
 3573          * some info and a volume's will be 0.  Use that to remove disks.
 3574          */
 3575         mps_wd_config_pages(sc);
 3576 
 3577         /*
 3578          * Done waiting for port enable to complete.  Decrement the refcount.
 3579          * If refcount is 0, discovery is complete and a rescan of the bus can
 3580          * take place.  Since the simq was explicitly frozen before port
 3581          * enable, it must be explicitly released here to keep the
 3582          * freeze/release count in sync.
 3583          */
 3584         sc->wait_for_port_enable = 0;
 3585         sc->port_enable_complete = 1;
 3586         wakeup(&sc->port_enable_complete);
 3587         mpssas_startup_decrement(sassc);
 3588 }
 3589 
 3590 int
 3591 mpssas_check_id(struct mpssas_softc *sassc, int id)
 3592 {
 3593         struct mps_softc *sc = sassc->sc;
 3594         char *ids;
 3595         char *name;
 3596 
 3597         ids = &sc->exclude_ids[0];
 3598         while((name = strsep(&ids, ",")) != NULL) {
 3599                 if (name[0] == '\0')
 3600                         continue;
 3601                 if (strtol(name, NULL, 0) == (long)id)
 3602                         return (1);
 3603         }
 3604 
 3605         return (0);
 3606 }
 3607 
 3608 void
 3609 mpssas_realloc_targets(struct mps_softc *sc, int maxtargets)
 3610 {
 3611         struct mpssas_softc *sassc;
 3612         struct mpssas_lun *lun, *lun_tmp;
 3613         struct mpssas_target *targ;
 3614         int i;
 3615 
 3616         sassc = sc->sassc;
 3617         /*
 3618          * The number of targets is based on IOC Facts, so free all of
 3619          * the allocated LUNs for each target and then the target buffer
 3620          * itself.
 3621          */
 3622         for (i=0; i< maxtargets; i++) {
 3623                 targ = &sassc->targets[i];
 3624                 SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
 3625                         free(lun, M_MPT2);
 3626                 }
 3627         }
 3628         free(sassc->targets, M_MPT2);
 3629 
 3630         sassc->targets = malloc(sizeof(struct mpssas_target) * maxtargets,
 3631             M_MPT2, M_WAITOK|M_ZERO);
 3632         if (!sassc->targets) {
 3633                 panic("%s failed to alloc targets with error %d\n",
 3634                     __func__, ENOMEM);
 3635         }
 3636 }

Cache object: b52bc2dc45e5f5ca24b5511d6c404521


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.