The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/cam/cam_xpt.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Implementation of the Common Access Method Transport (XPT) layer.
    3  *
    4  * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs.
    5  * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry.
    6  * All rights reserved.
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions, and the following disclaimer,
   13  *    without modification, immediately at the beginning of the file.
   14  * 2. The name of the author may not be used to endorse or promote products
   15  *    derived from this software without specific prior written permission.
   16  *
   17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   20  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
   21  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   27  * SUCH DAMAGE.
   28  */
   29 
   30 #include <sys/cdefs.h>
   31 __FBSDID("$FreeBSD$");
   32 
   33 #include <sys/param.h>
   34 #include <sys/bus.h>
   35 #include <sys/systm.h>
   36 #include <sys/types.h>
   37 #include <sys/malloc.h>
   38 #include <sys/kernel.h>
   39 #include <sys/time.h>
   40 #include <sys/conf.h>
   41 #include <sys/fcntl.h>
   42 #include <sys/interrupt.h>
   43 #include <sys/proc.h>
   44 #include <sys/sbuf.h>
   45 #include <sys/smp.h>
   46 #include <sys/taskqueue.h>
   47 
   48 #include <sys/lock.h>
   49 #include <sys/mutex.h>
   50 #include <sys/sysctl.h>
   51 #include <sys/kthread.h>
   52 
   53 #include <cam/cam.h>
   54 #include <cam/cam_ccb.h>
   55 #include <cam/cam_periph.h>
   56 #include <cam/cam_queue.h>
   57 #include <cam/cam_sim.h>
   58 #include <cam/cam_xpt.h>
   59 #include <cam/cam_xpt_sim.h>
   60 #include <cam/cam_xpt_periph.h>
   61 #include <cam/cam_xpt_internal.h>
   62 #include <cam/cam_debug.h>
   63 #include <cam/cam_compat.h>
   64 
   65 #include <cam/scsi/scsi_all.h>
   66 #include <cam/scsi/scsi_message.h>
   67 #include <cam/scsi/scsi_pass.h>
   68 
   69 #include <machine/md_var.h>     /* geometry translation */
   70 #include <machine/stdarg.h>     /* for xpt_print below */
   71 
   72 #include "opt_cam.h"
   73 
   74 /*
   75  * This is the maximum number of high powered commands (e.g. start unit)
   76  * that can be outstanding at a particular time.
   77  */
   78 #ifndef CAM_MAX_HIGHPOWER
   79 #define CAM_MAX_HIGHPOWER  4
   80 #endif
   81 
   82 /* Datastructures internal to the xpt layer */
   83 MALLOC_DEFINE(M_CAMXPT, "CAM XPT", "CAM XPT buffers");
   84 MALLOC_DEFINE(M_CAMDEV, "CAM DEV", "CAM devices");
   85 MALLOC_DEFINE(M_CAMCCB, "CAM CCB", "CAM CCBs");
   86 MALLOC_DEFINE(M_CAMPATH, "CAM path", "CAM paths");
   87 
   88 /* Object for defering XPT actions to a taskqueue */
   89 struct xpt_task {
   90         struct task     task;
   91         void            *data1;
   92         uintptr_t       data2;
   93 };
   94 
   95 struct xpt_softc {
   96         uint32_t                xpt_generation;
   97 
   98         /* number of high powered commands that can go through right now */
   99         struct mtx              xpt_highpower_lock;
  100         STAILQ_HEAD(highpowerlist, cam_ed)      highpowerq;
  101         int                     num_highpower;
  102 
  103         /* queue for handling async rescan requests. */
  104         TAILQ_HEAD(, ccb_hdr) ccb_scanq;
  105         int buses_to_config;
  106         int buses_config_done;
  107 
  108         /* Registered busses */
  109         TAILQ_HEAD(,cam_eb)     xpt_busses;
  110         u_int                   bus_generation;
  111 
  112         struct intr_config_hook *xpt_config_hook;
  113 
  114         int                     boot_delay;
  115         struct callout          boot_callout;
  116 
  117         struct mtx              xpt_topo_lock;
  118         struct mtx              xpt_lock;
  119         struct taskqueue        *xpt_taskq;
  120 };
  121 
  122 typedef enum {
  123         DM_RET_COPY             = 0x01,
  124         DM_RET_FLAG_MASK        = 0x0f,
  125         DM_RET_NONE             = 0x00,
  126         DM_RET_STOP             = 0x10,
  127         DM_RET_DESCEND          = 0x20,
  128         DM_RET_ERROR            = 0x30,
  129         DM_RET_ACTION_MASK      = 0xf0
  130 } dev_match_ret;
  131 
  132 typedef enum {
  133         XPT_DEPTH_BUS,
  134         XPT_DEPTH_TARGET,
  135         XPT_DEPTH_DEVICE,
  136         XPT_DEPTH_PERIPH
  137 } xpt_traverse_depth;
  138 
  139 struct xpt_traverse_config {
  140         xpt_traverse_depth      depth;
  141         void                    *tr_func;
  142         void                    *tr_arg;
  143 };
  144 
  145 typedef int     xpt_busfunc_t (struct cam_eb *bus, void *arg);
  146 typedef int     xpt_targetfunc_t (struct cam_et *target, void *arg);
  147 typedef int     xpt_devicefunc_t (struct cam_ed *device, void *arg);
  148 typedef int     xpt_periphfunc_t (struct cam_periph *periph, void *arg);
  149 typedef int     xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg);
  150 
  151 /* Transport layer configuration information */
  152 static struct xpt_softc xsoftc;
  153 
  154 MTX_SYSINIT(xpt_topo_init, &xsoftc.xpt_topo_lock, "XPT topology lock", MTX_DEF);
  155 
  156 TUNABLE_INT("kern.cam.boot_delay", &xsoftc.boot_delay);
  157 SYSCTL_INT(_kern_cam, OID_AUTO, boot_delay, CTLFLAG_RDTUN,
  158            &xsoftc.boot_delay, 0, "Bus registration wait time");
  159 SYSCTL_UINT(_kern_cam, OID_AUTO, xpt_generation, CTLFLAG_RD,
  160             &xsoftc.xpt_generation, 0, "CAM peripheral generation count");
  161 
  162 struct cam_doneq {
  163         struct mtx_padalign     cam_doneq_mtx;
  164         STAILQ_HEAD(, ccb_hdr)  cam_doneq;
  165         int                     cam_doneq_sleep;
  166 };
  167 
  168 static struct cam_doneq cam_doneqs[MAXCPU];
  169 static int cam_num_doneqs;
  170 static struct proc *cam_proc;
  171 
  172 TUNABLE_INT("kern.cam.num_doneqs", &cam_num_doneqs);
  173 SYSCTL_INT(_kern_cam, OID_AUTO, num_doneqs, CTLFLAG_RDTUN,
  174            &cam_num_doneqs, 0, "Number of completion queues/threads");
  175 
  176 struct cam_periph *xpt_periph;
  177 
  178 static periph_init_t xpt_periph_init;
  179 
  180 static struct periph_driver xpt_driver =
  181 {
  182         xpt_periph_init, "xpt",
  183         TAILQ_HEAD_INITIALIZER(xpt_driver.units), /* generation */ 0,
  184         CAM_PERIPH_DRV_EARLY
  185 };
  186 
  187 PERIPHDRIVER_DECLARE(xpt, xpt_driver);
  188 
  189 static d_open_t xptopen;
  190 static d_close_t xptclose;
  191 static d_ioctl_t xptioctl;
  192 static d_ioctl_t xptdoioctl;
  193 
  194 static struct cdevsw xpt_cdevsw = {
  195         .d_version =    D_VERSION,
  196         .d_flags =      0,
  197         .d_open =       xptopen,
  198         .d_close =      xptclose,
  199         .d_ioctl =      xptioctl,
  200         .d_name =       "xpt",
  201 };
  202 
  203 /* Storage for debugging datastructures */
  204 struct cam_path *cam_dpath;
  205 u_int32_t cam_dflags = CAM_DEBUG_FLAGS;
  206 TUNABLE_INT("kern.cam.dflags", &cam_dflags);
  207 SYSCTL_UINT(_kern_cam, OID_AUTO, dflags, CTLFLAG_RW,
  208         &cam_dflags, 0, "Enabled debug flags");
  209 u_int32_t cam_debug_delay = CAM_DEBUG_DELAY;
  210 TUNABLE_INT("kern.cam.debug_delay", &cam_debug_delay);
  211 SYSCTL_UINT(_kern_cam, OID_AUTO, debug_delay, CTLFLAG_RW,
  212         &cam_debug_delay, 0, "Delay in us after each debug message");
  213 
  214 /* Our boot-time initialization hook */
  215 static int cam_module_event_handler(module_t, int /*modeventtype_t*/, void *);
  216 
  217 static moduledata_t cam_moduledata = {
  218         "cam",
  219         cam_module_event_handler,
  220         NULL
  221 };
  222 
  223 static int      xpt_init(void *);
  224 
  225 DECLARE_MODULE(cam, cam_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND);
  226 MODULE_VERSION(cam, 1);
  227 
  228 
  229 static void             xpt_async_bcast(struct async_list *async_head,
  230                                         u_int32_t async_code,
  231                                         struct cam_path *path,
  232                                         void *async_arg);
  233 static path_id_t xptnextfreepathid(void);
  234 static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus);
  235 static union ccb *xpt_get_ccb(struct cam_periph *periph);
  236 static union ccb *xpt_get_ccb_nowait(struct cam_periph *periph);
  237 static void      xpt_run_allocq(struct cam_periph *periph, int sleep);
  238 static void      xpt_run_allocq_task(void *context, int pending);
  239 static void      xpt_run_devq(struct cam_devq *devq);
  240 static timeout_t xpt_release_devq_timeout;
  241 static void      xpt_release_simq_timeout(void *arg) __unused;
  242 static void      xpt_acquire_bus(struct cam_eb *bus);
  243 static void      xpt_release_bus(struct cam_eb *bus);
  244 static uint32_t  xpt_freeze_devq_device(struct cam_ed *dev, u_int count);
  245 static int       xpt_release_devq_device(struct cam_ed *dev, u_int count,
  246                     int run_queue);
  247 static struct cam_et*
  248                  xpt_alloc_target(struct cam_eb *bus, target_id_t target_id);
  249 static void      xpt_acquire_target(struct cam_et *target);
  250 static void      xpt_release_target(struct cam_et *target);
  251 static struct cam_eb*
  252                  xpt_find_bus(path_id_t path_id);
  253 static struct cam_et*
  254                  xpt_find_target(struct cam_eb *bus, target_id_t target_id);
  255 static struct cam_ed*
  256                  xpt_find_device(struct cam_et *target, lun_id_t lun_id);
  257 static void      xpt_config(void *arg);
  258 static int       xpt_schedule_dev(struct camq *queue, cam_pinfo *dev_pinfo,
  259                                  u_int32_t new_priority);
  260 static xpt_devicefunc_t xptpassannouncefunc;
  261 static void      xptaction(struct cam_sim *sim, union ccb *work_ccb);
  262 static void      xptpoll(struct cam_sim *sim);
  263 static void      camisr_runqueue(void);
  264 static void      xpt_done_process(struct ccb_hdr *ccb_h);
  265 static void      xpt_done_td(void *);
  266 static dev_match_ret    xptbusmatch(struct dev_match_pattern *patterns,
  267                                     u_int num_patterns, struct cam_eb *bus);
  268 static dev_match_ret    xptdevicematch(struct dev_match_pattern *patterns,
  269                                        u_int num_patterns,
  270                                        struct cam_ed *device);
  271 static dev_match_ret    xptperiphmatch(struct dev_match_pattern *patterns,
  272                                        u_int num_patterns,
  273                                        struct cam_periph *periph);
  274 static xpt_busfunc_t    xptedtbusfunc;
  275 static xpt_targetfunc_t xptedttargetfunc;
  276 static xpt_devicefunc_t xptedtdevicefunc;
  277 static xpt_periphfunc_t xptedtperiphfunc;
  278 static xpt_pdrvfunc_t   xptplistpdrvfunc;
  279 static xpt_periphfunc_t xptplistperiphfunc;
  280 static int              xptedtmatch(struct ccb_dev_match *cdm);
  281 static int              xptperiphlistmatch(struct ccb_dev_match *cdm);
  282 static int              xptbustraverse(struct cam_eb *start_bus,
  283                                        xpt_busfunc_t *tr_func, void *arg);
  284 static int              xpttargettraverse(struct cam_eb *bus,
  285                                           struct cam_et *start_target,
  286                                           xpt_targetfunc_t *tr_func, void *arg);
  287 static int              xptdevicetraverse(struct cam_et *target,
  288                                           struct cam_ed *start_device,
  289                                           xpt_devicefunc_t *tr_func, void *arg);
  290 static int              xptperiphtraverse(struct cam_ed *device,
  291                                           struct cam_periph *start_periph,
  292                                           xpt_periphfunc_t *tr_func, void *arg);
  293 static int              xptpdrvtraverse(struct periph_driver **start_pdrv,
  294                                         xpt_pdrvfunc_t *tr_func, void *arg);
  295 static int              xptpdperiphtraverse(struct periph_driver **pdrv,
  296                                             struct cam_periph *start_periph,
  297                                             xpt_periphfunc_t *tr_func,
  298                                             void *arg);
  299 static xpt_busfunc_t    xptdefbusfunc;
  300 static xpt_targetfunc_t xptdeftargetfunc;
  301 static xpt_devicefunc_t xptdefdevicefunc;
  302 static xpt_periphfunc_t xptdefperiphfunc;
  303 static void             xpt_finishconfig_task(void *context, int pending);
  304 static void             xpt_dev_async_default(u_int32_t async_code,
  305                                               struct cam_eb *bus,
  306                                               struct cam_et *target,
  307                                               struct cam_ed *device,
  308                                               void *async_arg);
  309 static struct cam_ed *  xpt_alloc_device_default(struct cam_eb *bus,
  310                                                  struct cam_et *target,
  311                                                  lun_id_t lun_id);
  312 static xpt_devicefunc_t xptsetasyncfunc;
  313 static xpt_busfunc_t    xptsetasyncbusfunc;
  314 static cam_status       xptregister(struct cam_periph *periph,
  315                                     void *arg);
  316 static __inline int device_is_queued(struct cam_ed *device);
  317 
  318 static __inline int
  319 xpt_schedule_devq(struct cam_devq *devq, struct cam_ed *dev)
  320 {
  321         int     retval;
  322 
  323         mtx_assert(&devq->send_mtx, MA_OWNED);
  324         if ((dev->ccbq.queue.entries > 0) &&
  325             (dev->ccbq.dev_openings > 0) &&
  326             (dev->ccbq.queue.qfrozen_cnt == 0)) {
  327                 /*
  328                  * The priority of a device waiting for controller
  329                  * resources is that of the highest priority CCB
  330                  * enqueued.
  331                  */
  332                 retval =
  333                     xpt_schedule_dev(&devq->send_queue,
  334                                      &dev->devq_entry,
  335                                      CAMQ_GET_PRIO(&dev->ccbq.queue));
  336         } else {
  337                 retval = 0;
  338         }
  339         return (retval);
  340 }
  341 
  342 static __inline int
  343 device_is_queued(struct cam_ed *device)
  344 {
  345         return (device->devq_entry.index != CAM_UNQUEUED_INDEX);
  346 }
  347 
  348 static void
  349 xpt_periph_init()
  350 {
  351         make_dev(&xpt_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "xpt0");
  352 }
  353 
  354 static int
  355 xptopen(struct cdev *dev, int flags, int fmt, struct thread *td)
  356 {
  357 
  358         /*
  359          * Only allow read-write access.
  360          */
  361         if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0))
  362                 return(EPERM);
  363 
  364         /*
  365          * We don't allow nonblocking access.
  366          */
  367         if ((flags & O_NONBLOCK) != 0) {
  368                 printf("%s: can't do nonblocking access\n", devtoname(dev));
  369                 return(ENODEV);
  370         }
  371 
  372         return(0);
  373 }
  374 
  375 static int
  376 xptclose(struct cdev *dev, int flag, int fmt, struct thread *td)
  377 {
  378 
  379         return(0);
  380 }
  381 
  382 /*
  383  * Don't automatically grab the xpt softc lock here even though this is going
  384  * through the xpt device.  The xpt device is really just a back door for
  385  * accessing other devices and SIMs, so the right thing to do is to grab
  386  * the appropriate SIM lock once the bus/SIM is located.
  387  */
  388 static int
  389 xptioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
  390 {
  391         int error;
  392 
  393         if ((error = xptdoioctl(dev, cmd, addr, flag, td)) == ENOTTY) {
  394                 error = cam_compat_ioctl(dev, cmd, addr, flag, td, xptdoioctl);
  395         }
  396         return (error);
  397 }
  398         
  399 static int
  400 xptdoioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
  401 {
  402         int error;
  403 
  404         error = 0;
  405 
  406         switch(cmd) {
  407         /*
  408          * For the transport layer CAMIOCOMMAND ioctl, we really only want
  409          * to accept CCB types that don't quite make sense to send through a
  410          * passthrough driver. XPT_PATH_INQ is an exception to this, as stated
  411          * in the CAM spec.
  412          */
  413         case CAMIOCOMMAND: {
  414                 union ccb *ccb;
  415                 union ccb *inccb;
  416                 struct cam_eb *bus;
  417 
  418                 inccb = (union ccb *)addr;
  419 
  420                 bus = xpt_find_bus(inccb->ccb_h.path_id);
  421                 if (bus == NULL)
  422                         return (EINVAL);
  423 
  424                 switch (inccb->ccb_h.func_code) {
  425                 case XPT_SCAN_BUS:
  426                 case XPT_RESET_BUS:
  427                         if (inccb->ccb_h.target_id != CAM_TARGET_WILDCARD ||
  428                             inccb->ccb_h.target_lun != CAM_LUN_WILDCARD) {
  429                                 xpt_release_bus(bus);
  430                                 return (EINVAL);
  431                         }
  432                         break;
  433                 case XPT_SCAN_TGT:
  434                         if (inccb->ccb_h.target_id == CAM_TARGET_WILDCARD ||
  435                             inccb->ccb_h.target_lun != CAM_LUN_WILDCARD) {
  436                                 xpt_release_bus(bus);
  437                                 return (EINVAL);
  438                         }
  439                         break;
  440                 default:
  441                         break;
  442                 }
  443 
  444                 switch(inccb->ccb_h.func_code) {
  445                 case XPT_SCAN_BUS:
  446                 case XPT_RESET_BUS:
  447                 case XPT_PATH_INQ:
  448                 case XPT_ENG_INQ:
  449                 case XPT_SCAN_LUN:
  450                 case XPT_SCAN_TGT:
  451 
  452                         ccb = xpt_alloc_ccb();
  453 
  454                         /*
  455                          * Create a path using the bus, target, and lun the
  456                          * user passed in.
  457                          */
  458                         if (xpt_create_path(&ccb->ccb_h.path, NULL,
  459                                             inccb->ccb_h.path_id,
  460                                             inccb->ccb_h.target_id,
  461                                             inccb->ccb_h.target_lun) !=
  462                                             CAM_REQ_CMP){
  463                                 error = EINVAL;
  464                                 xpt_free_ccb(ccb);
  465                                 break;
  466                         }
  467                         /* Ensure all of our fields are correct */
  468                         xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path,
  469                                       inccb->ccb_h.pinfo.priority);
  470                         xpt_merge_ccb(ccb, inccb);
  471                         xpt_path_lock(ccb->ccb_h.path);
  472                         cam_periph_runccb(ccb, NULL, 0, 0, NULL);
  473                         xpt_path_unlock(ccb->ccb_h.path);
  474                         bcopy(ccb, inccb, sizeof(union ccb));
  475                         xpt_free_path(ccb->ccb_h.path);
  476                         xpt_free_ccb(ccb);
  477                         break;
  478 
  479                 case XPT_DEBUG: {
  480                         union ccb ccb;
  481 
  482                         /*
  483                          * This is an immediate CCB, so it's okay to
  484                          * allocate it on the stack.
  485                          */
  486 
  487                         /*
  488                          * Create a path using the bus, target, and lun the
  489                          * user passed in.
  490                          */
  491                         if (xpt_create_path(&ccb.ccb_h.path, NULL,
  492                                             inccb->ccb_h.path_id,
  493                                             inccb->ccb_h.target_id,
  494                                             inccb->ccb_h.target_lun) !=
  495                                             CAM_REQ_CMP){
  496                                 error = EINVAL;
  497                                 break;
  498                         }
  499                         /* Ensure all of our fields are correct */
  500                         xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path,
  501                                       inccb->ccb_h.pinfo.priority);
  502                         xpt_merge_ccb(&ccb, inccb);
  503                         xpt_action(&ccb);
  504                         bcopy(&ccb, inccb, sizeof(union ccb));
  505                         xpt_free_path(ccb.ccb_h.path);
  506                         break;
  507 
  508                 }
  509                 case XPT_DEV_MATCH: {
  510                         struct cam_periph_map_info mapinfo;
  511                         struct cam_path *old_path;
  512 
  513                         /*
  514                          * We can't deal with physical addresses for this
  515                          * type of transaction.
  516                          */
  517                         if ((inccb->ccb_h.flags & CAM_DATA_MASK) !=
  518                             CAM_DATA_VADDR) {
  519                                 error = EINVAL;
  520                                 break;
  521                         }
  522 
  523                         /*
  524                          * Save this in case the caller had it set to
  525                          * something in particular.
  526                          */
  527                         old_path = inccb->ccb_h.path;
  528 
  529                         /*
  530                          * We really don't need a path for the matching
  531                          * code.  The path is needed because of the
  532                          * debugging statements in xpt_action().  They
  533                          * assume that the CCB has a valid path.
  534                          */
  535                         inccb->ccb_h.path = xpt_periph->path;
  536 
  537                         bzero(&mapinfo, sizeof(mapinfo));
  538 
  539                         /*
  540                          * Map the pattern and match buffers into kernel
  541                          * virtual address space.
  542                          */
  543                         error = cam_periph_mapmem(inccb, &mapinfo, MAXPHYS);
  544 
  545                         if (error) {
  546                                 inccb->ccb_h.path = old_path;
  547                                 break;
  548                         }
  549 
  550                         /*
  551                          * This is an immediate CCB, we can send it on directly.
  552                          */
  553                         xpt_action(inccb);
  554 
  555                         /*
  556                          * Map the buffers back into user space.
  557                          */
  558                         cam_periph_unmapmem(inccb, &mapinfo);
  559 
  560                         inccb->ccb_h.path = old_path;
  561 
  562                         error = 0;
  563                         break;
  564                 }
  565                 default:
  566                         error = ENOTSUP;
  567                         break;
  568                 }
  569                 xpt_release_bus(bus);
  570                 break;
  571         }
  572         /*
  573          * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input,
  574          * with the periphal driver name and unit name filled in.  The other
  575          * fields don't really matter as input.  The passthrough driver name
  576          * ("pass"), and unit number are passed back in the ccb.  The current
  577          * device generation number, and the index into the device peripheral
  578          * driver list, and the status are also passed back.  Note that
  579          * since we do everything in one pass, unlike the XPT_GDEVLIST ccb,
  580          * we never return a status of CAM_GDEVLIST_LIST_CHANGED.  It is
  581          * (or rather should be) impossible for the device peripheral driver
  582          * list to change since we look at the whole thing in one pass, and
  583          * we do it with lock protection.
  584          *
  585          */
  586         case CAMGETPASSTHRU: {
  587                 union ccb *ccb;
  588                 struct cam_periph *periph;
  589                 struct periph_driver **p_drv;
  590                 char   *name;
  591                 u_int unit;
  592                 int base_periph_found;
  593 
  594                 ccb = (union ccb *)addr;
  595                 unit = ccb->cgdl.unit_number;
  596                 name = ccb->cgdl.periph_name;
  597                 base_periph_found = 0;
  598 
  599                 /*
  600                  * Sanity check -- make sure we don't get a null peripheral
  601                  * driver name.
  602                  */
  603                 if (*ccb->cgdl.periph_name == '\0') {
  604                         error = EINVAL;
  605                         break;
  606                 }
  607 
  608                 /* Keep the list from changing while we traverse it */
  609                 xpt_lock_buses();
  610 
  611                 /* first find our driver in the list of drivers */
  612                 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++)
  613                         if (strcmp((*p_drv)->driver_name, name) == 0)
  614                                 break;
  615 
  616                 if (*p_drv == NULL) {
  617                         xpt_unlock_buses();
  618                         ccb->ccb_h.status = CAM_REQ_CMP_ERR;
  619                         ccb->cgdl.status = CAM_GDEVLIST_ERROR;
  620                         *ccb->cgdl.periph_name = '\0';
  621                         ccb->cgdl.unit_number = 0;
  622                         error = ENOENT;
  623                         break;
  624                 }
  625 
  626                 /*
  627                  * Run through every peripheral instance of this driver
  628                  * and check to see whether it matches the unit passed
  629                  * in by the user.  If it does, get out of the loops and
  630                  * find the passthrough driver associated with that
  631                  * peripheral driver.
  632                  */
  633                 for (periph = TAILQ_FIRST(&(*p_drv)->units); periph != NULL;
  634                      periph = TAILQ_NEXT(periph, unit_links)) {
  635 
  636                         if (periph->unit_number == unit)
  637                                 break;
  638                 }
  639                 /*
  640                  * If we found the peripheral driver that the user passed
  641                  * in, go through all of the peripheral drivers for that
  642                  * particular device and look for a passthrough driver.
  643                  */
  644                 if (periph != NULL) {
  645                         struct cam_ed *device;
  646                         int i;
  647 
  648                         base_periph_found = 1;
  649                         device = periph->path->device;
  650                         for (i = 0, periph = SLIST_FIRST(&device->periphs);
  651                              periph != NULL;
  652                              periph = SLIST_NEXT(periph, periph_links), i++) {
  653                                 /*
  654                                  * Check to see whether we have a
  655                                  * passthrough device or not.
  656                                  */
  657                                 if (strcmp(periph->periph_name, "pass") == 0) {
  658                                         /*
  659                                          * Fill in the getdevlist fields.
  660                                          */
  661                                         strcpy(ccb->cgdl.periph_name,
  662                                                periph->periph_name);
  663                                         ccb->cgdl.unit_number =
  664                                                 periph->unit_number;
  665                                         if (SLIST_NEXT(periph, periph_links))
  666                                                 ccb->cgdl.status =
  667                                                         CAM_GDEVLIST_MORE_DEVS;
  668                                         else
  669                                                 ccb->cgdl.status =
  670                                                        CAM_GDEVLIST_LAST_DEVICE;
  671                                         ccb->cgdl.generation =
  672                                                 device->generation;
  673                                         ccb->cgdl.index = i;
  674                                         /*
  675                                          * Fill in some CCB header fields
  676                                          * that the user may want.
  677                                          */
  678                                         ccb->ccb_h.path_id =
  679                                                 periph->path->bus->path_id;
  680                                         ccb->ccb_h.target_id =
  681                                                 periph->path->target->target_id;
  682                                         ccb->ccb_h.target_lun =
  683                                                 periph->path->device->lun_id;
  684                                         ccb->ccb_h.status = CAM_REQ_CMP;
  685                                         break;
  686                                 }
  687                         }
  688                 }
  689 
  690                 /*
  691                  * If the periph is null here, one of two things has
  692                  * happened.  The first possibility is that we couldn't
  693                  * find the unit number of the particular peripheral driver
  694                  * that the user is asking about.  e.g. the user asks for
  695                  * the passthrough driver for "da11".  We find the list of
  696                  * "da" peripherals all right, but there is no unit 11.
  697                  * The other possibility is that we went through the list
  698                  * of peripheral drivers attached to the device structure,
  699                  * but didn't find one with the name "pass".  Either way,
  700                  * we return ENOENT, since we couldn't find something.
  701                  */
  702                 if (periph == NULL) {
  703                         ccb->ccb_h.status = CAM_REQ_CMP_ERR;
  704                         ccb->cgdl.status = CAM_GDEVLIST_ERROR;
  705                         *ccb->cgdl.periph_name = '\0';
  706                         ccb->cgdl.unit_number = 0;
  707                         error = ENOENT;
  708                         /*
  709                          * It is unfortunate that this is even necessary,
  710                          * but there are many, many clueless users out there.
  711                          * If this is true, the user is looking for the
  712                          * passthrough driver, but doesn't have one in his
  713                          * kernel.
  714                          */
  715                         if (base_periph_found == 1) {
  716                                 printf("xptioctl: pass driver is not in the "
  717                                        "kernel\n");
  718                                 printf("xptioctl: put \"device pass\" in "
  719                                        "your kernel config file\n");
  720                         }
  721                 }
  722                 xpt_unlock_buses();
  723                 break;
  724                 }
  725         default:
  726                 error = ENOTTY;
  727                 break;
  728         }
  729 
  730         return(error);
  731 }
  732 
  733 static int
  734 cam_module_event_handler(module_t mod, int what, void *arg)
  735 {
  736         int error;
  737 
  738         switch (what) {
  739         case MOD_LOAD:
  740                 if ((error = xpt_init(NULL)) != 0)
  741                         return (error);
  742                 break;
  743         case MOD_UNLOAD:
  744                 return EBUSY;
  745         default:
  746                 return EOPNOTSUPP;
  747         }
  748 
  749         return 0;
  750 }
  751 
  752 static void
  753 xpt_rescan_done(struct cam_periph *periph, union ccb *done_ccb)
  754 {
  755 
  756         if (done_ccb->ccb_h.ppriv_ptr1 == NULL) {
  757                 xpt_free_path(done_ccb->ccb_h.path);
  758                 xpt_free_ccb(done_ccb);
  759         } else {
  760                 done_ccb->ccb_h.cbfcnp = done_ccb->ccb_h.ppriv_ptr1;
  761                 (*done_ccb->ccb_h.cbfcnp)(periph, done_ccb);
  762         }
  763         xpt_release_boot();
  764 }
  765 
  766 /* thread to handle bus rescans */
  767 static void
  768 xpt_scanner_thread(void *dummy)
  769 {
  770         union ccb       *ccb;
  771         struct cam_path  path;
  772 
  773         xpt_lock_buses();
  774         for (;;) {
  775                 if (TAILQ_EMPTY(&xsoftc.ccb_scanq))
  776                         msleep(&xsoftc.ccb_scanq, &xsoftc.xpt_topo_lock, PRIBIO,
  777                                "-", 0);
  778                 if ((ccb = (union ccb *)TAILQ_FIRST(&xsoftc.ccb_scanq)) != NULL) {
  779                         TAILQ_REMOVE(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe);
  780                         xpt_unlock_buses();
  781 
  782                         /*
  783                          * Since lock can be dropped inside and path freed
  784                          * by completion callback even before return here,
  785                          * take our own path copy for reference.
  786                          */
  787                         xpt_copy_path(&path, ccb->ccb_h.path);
  788                         xpt_path_lock(&path);
  789                         xpt_action(ccb);
  790                         xpt_path_unlock(&path);
  791                         xpt_release_path(&path);
  792 
  793                         xpt_lock_buses();
  794                 }
  795         }
  796 }
  797 
  798 void
  799 xpt_rescan(union ccb *ccb)
  800 {
  801         struct ccb_hdr *hdr;
  802 
  803         /* Prepare request */
  804         if (ccb->ccb_h.path->target->target_id == CAM_TARGET_WILDCARD &&
  805             ccb->ccb_h.path->device->lun_id == CAM_LUN_WILDCARD)
  806                 ccb->ccb_h.func_code = XPT_SCAN_BUS;
  807         else if (ccb->ccb_h.path->target->target_id != CAM_TARGET_WILDCARD &&
  808             ccb->ccb_h.path->device->lun_id == CAM_LUN_WILDCARD)
  809                 ccb->ccb_h.func_code = XPT_SCAN_TGT;
  810         else if (ccb->ccb_h.path->target->target_id != CAM_TARGET_WILDCARD &&
  811             ccb->ccb_h.path->device->lun_id != CAM_LUN_WILDCARD)
  812                 ccb->ccb_h.func_code = XPT_SCAN_LUN;
  813         else {
  814                 xpt_print(ccb->ccb_h.path, "illegal scan path\n");
  815                 xpt_free_path(ccb->ccb_h.path);
  816                 xpt_free_ccb(ccb);
  817                 return;
  818         }
  819         ccb->ccb_h.ppriv_ptr1 = ccb->ccb_h.cbfcnp;
  820         ccb->ccb_h.cbfcnp = xpt_rescan_done;
  821         xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, CAM_PRIORITY_XPT);
  822         /* Don't make duplicate entries for the same paths. */
  823         xpt_lock_buses();
  824         if (ccb->ccb_h.ppriv_ptr1 == NULL) {
  825                 TAILQ_FOREACH(hdr, &xsoftc.ccb_scanq, sim_links.tqe) {
  826                         if (xpt_path_comp(hdr->path, ccb->ccb_h.path) == 0) {
  827                                 wakeup(&xsoftc.ccb_scanq);
  828                                 xpt_unlock_buses();
  829                                 xpt_print(ccb->ccb_h.path, "rescan already queued\n");
  830                                 xpt_free_path(ccb->ccb_h.path);
  831                                 xpt_free_ccb(ccb);
  832                                 return;
  833                         }
  834                 }
  835         }
  836         TAILQ_INSERT_TAIL(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe);
  837         xsoftc.buses_to_config++;
  838         wakeup(&xsoftc.ccb_scanq);
  839         xpt_unlock_buses();
  840 }
  841 
  842 /* Functions accessed by the peripheral drivers */
  843 static int
  844 xpt_init(void *dummy)
  845 {
  846         struct cam_sim *xpt_sim;
  847         struct cam_path *path;
  848         struct cam_devq *devq;
  849         cam_status status;
  850         int error, i;
  851 
  852         TAILQ_INIT(&xsoftc.xpt_busses);
  853         TAILQ_INIT(&xsoftc.ccb_scanq);
  854         STAILQ_INIT(&xsoftc.highpowerq);
  855         xsoftc.num_highpower = CAM_MAX_HIGHPOWER;
  856 
  857         mtx_init(&xsoftc.xpt_lock, "XPT lock", NULL, MTX_DEF);
  858         mtx_init(&xsoftc.xpt_highpower_lock, "XPT highpower lock", NULL, MTX_DEF);
  859         xsoftc.xpt_taskq = taskqueue_create("CAM XPT task", M_WAITOK,
  860             taskqueue_thread_enqueue, /*context*/&xsoftc.xpt_taskq);
  861 
  862 #ifdef CAM_BOOT_DELAY
  863         /*
  864          * Override this value at compile time to assist our users
  865          * who don't use loader to boot a kernel.
  866          */
  867         xsoftc.boot_delay = CAM_BOOT_DELAY;
  868 #endif
  869         /*
  870          * The xpt layer is, itself, the equivalent of a SIM.
  871          * Allow 16 ccbs in the ccb pool for it.  This should
  872          * give decent parallelism when we probe busses and
  873          * perform other XPT functions.
  874          */
  875         devq = cam_simq_alloc(16);
  876         xpt_sim = cam_sim_alloc(xptaction,
  877                                 xptpoll,
  878                                 "xpt",
  879                                 /*softc*/NULL,
  880                                 /*unit*/0,
  881                                 /*mtx*/&xsoftc.xpt_lock,
  882                                 /*max_dev_transactions*/0,
  883                                 /*max_tagged_dev_transactions*/0,
  884                                 devq);
  885         if (xpt_sim == NULL)
  886                 return (ENOMEM);
  887 
  888         mtx_lock(&xsoftc.xpt_lock);
  889         if ((status = xpt_bus_register(xpt_sim, NULL, 0)) != CAM_SUCCESS) {
  890                 mtx_unlock(&xsoftc.xpt_lock);
  891                 printf("xpt_init: xpt_bus_register failed with status %#x,"
  892                        " failing attach\n", status);
  893                 return (EINVAL);
  894         }
  895         mtx_unlock(&xsoftc.xpt_lock);
  896 
  897         /*
  898          * Looking at the XPT from the SIM layer, the XPT is
  899          * the equivalent of a peripheral driver.  Allocate
  900          * a peripheral driver entry for us.
  901          */
  902         if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID,
  903                                       CAM_TARGET_WILDCARD,
  904                                       CAM_LUN_WILDCARD)) != CAM_REQ_CMP) {
  905                 printf("xpt_init: xpt_create_path failed with status %#x,"
  906                        " failing attach\n", status);
  907                 return (EINVAL);
  908         }
  909         xpt_path_lock(path);
  910         cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO,
  911                          path, NULL, 0, xpt_sim);
  912         xpt_path_unlock(path);
  913         xpt_free_path(path);
  914 
  915         if (cam_num_doneqs < 1)
  916                 cam_num_doneqs = 1 + mp_ncpus / 6;
  917         else if (cam_num_doneqs > MAXCPU)
  918                 cam_num_doneqs = MAXCPU;
  919         for (i = 0; i < cam_num_doneqs; i++) {
  920                 mtx_init(&cam_doneqs[i].cam_doneq_mtx, "CAM doneq", NULL,
  921                     MTX_DEF);
  922                 STAILQ_INIT(&cam_doneqs[i].cam_doneq);
  923                 error = kproc_kthread_add(xpt_done_td, &cam_doneqs[i],
  924                     &cam_proc, NULL, 0, 0, "cam", "doneq%d", i);
  925                 if (error != 0) {
  926                         cam_num_doneqs = i;
  927                         break;
  928                 }
  929         }
  930         if (cam_num_doneqs < 1) {
  931                 printf("xpt_init: Cannot init completion queues "
  932                        "- failing attach\n");
  933                 return (ENOMEM);
  934         }
  935         /*
  936          * Register a callback for when interrupts are enabled.
  937          */
  938         xsoftc.xpt_config_hook =
  939             (struct intr_config_hook *)malloc(sizeof(struct intr_config_hook),
  940                                               M_CAMXPT, M_NOWAIT | M_ZERO);
  941         if (xsoftc.xpt_config_hook == NULL) {
  942                 printf("xpt_init: Cannot malloc config hook "
  943                        "- failing attach\n");
  944                 return (ENOMEM);
  945         }
  946         xsoftc.xpt_config_hook->ich_func = xpt_config;
  947         if (config_intrhook_establish(xsoftc.xpt_config_hook) != 0) {
  948                 free (xsoftc.xpt_config_hook, M_CAMXPT);
  949                 printf("xpt_init: config_intrhook_establish failed "
  950                        "- failing attach\n");
  951         }
  952 
  953         return (0);
  954 }
  955 
  956 static cam_status
  957 xptregister(struct cam_periph *periph, void *arg)
  958 {
  959         struct cam_sim *xpt_sim;
  960 
  961         if (periph == NULL) {
  962                 printf("xptregister: periph was NULL!!\n");
  963                 return(CAM_REQ_CMP_ERR);
  964         }
  965 
  966         xpt_sim = (struct cam_sim *)arg;
  967         xpt_sim->softc = periph;
  968         xpt_periph = periph;
  969         periph->softc = NULL;
  970 
  971         return(CAM_REQ_CMP);
  972 }
  973 
  974 int32_t
  975 xpt_add_periph(struct cam_periph *periph)
  976 {
  977         struct cam_ed *device;
  978         int32_t  status;
  979 
  980         TASK_INIT(&periph->periph_run_task, 0, xpt_run_allocq_task, periph);
  981         device = periph->path->device;
  982         status = CAM_REQ_CMP;
  983         if (device != NULL) {
  984                 mtx_lock(&device->target->bus->eb_mtx);
  985                 device->generation++;
  986                 SLIST_INSERT_HEAD(&device->periphs, periph, periph_links);
  987                 mtx_unlock(&device->target->bus->eb_mtx);
  988                 atomic_add_32(&xsoftc.xpt_generation, 1);
  989         }
  990 
  991         return (status);
  992 }
  993 
  994 void
  995 xpt_remove_periph(struct cam_periph *periph)
  996 {
  997         struct cam_ed *device;
  998 
  999         device = periph->path->device;
 1000         if (device != NULL) {
 1001                 mtx_lock(&device->target->bus->eb_mtx);
 1002                 device->generation++;
 1003                 SLIST_REMOVE(&device->periphs, periph, cam_periph, periph_links);
 1004                 mtx_unlock(&device->target->bus->eb_mtx);
 1005                 atomic_add_32(&xsoftc.xpt_generation, 1);
 1006         }
 1007 }
 1008 
 1009 
 1010 void
 1011 xpt_announce_periph(struct cam_periph *periph, char *announce_string)
 1012 {
 1013         struct  cam_path *path = periph->path;
 1014 
 1015         cam_periph_assert(periph, MA_OWNED);
 1016         periph->flags |= CAM_PERIPH_ANNOUNCED;
 1017 
 1018         printf("%s%d at %s%d bus %d scbus%d target %d lun %jx\n",
 1019                periph->periph_name, periph->unit_number,
 1020                path->bus->sim->sim_name,
 1021                path->bus->sim->unit_number,
 1022                path->bus->sim->bus_id,
 1023                path->bus->path_id,
 1024                path->target->target_id,
 1025                (uintmax_t)path->device->lun_id);
 1026         printf("%s%d: ", periph->periph_name, periph->unit_number);
 1027         if (path->device->protocol == PROTO_SCSI)
 1028                 scsi_print_inquiry(&path->device->inq_data);
 1029         else if (path->device->protocol == PROTO_ATA ||
 1030             path->device->protocol == PROTO_SATAPM)
 1031                 ata_print_ident(&path->device->ident_data);
 1032         else if (path->device->protocol == PROTO_SEMB)
 1033                 semb_print_ident(
 1034                     (struct sep_identify_data *)&path->device->ident_data);
 1035         else
 1036                 printf("Unknown protocol device\n");
 1037         if (path->device->serial_num_len > 0) {
 1038                 /* Don't wrap the screen  - print only the first 60 chars */
 1039                 printf("%s%d: Serial Number %.60s\n", periph->periph_name,
 1040                        periph->unit_number, path->device->serial_num);
 1041         }
 1042         /* Announce transport details. */
 1043         (*(path->bus->xport->announce))(periph);
 1044         /* Announce command queueing. */
 1045         if (path->device->inq_flags & SID_CmdQue
 1046          || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
 1047                 printf("%s%d: Command Queueing enabled\n",
 1048                        periph->periph_name, periph->unit_number);
 1049         }
 1050         /* Announce caller's details if they've passed in. */
 1051         if (announce_string != NULL)
 1052                 printf("%s%d: %s\n", periph->periph_name,
 1053                        periph->unit_number, announce_string);
 1054 }
 1055 
 1056 void
 1057 xpt_announce_quirks(struct cam_periph *periph, int quirks, char *bit_string)
 1058 {
 1059         if (quirks != 0) {
 1060                 printf("%s%d: quirks=0x%b\n", periph->periph_name,
 1061                     periph->unit_number, quirks, bit_string);
 1062         }
 1063 }
 1064 
 1065 void
 1066 xpt_denounce_periph(struct cam_periph *periph)
 1067 {
 1068         struct  cam_path *path = periph->path;
 1069 
 1070         cam_periph_assert(periph, MA_OWNED);
 1071         printf("%s%d at %s%d bus %d scbus%d target %d lun %jx\n",
 1072                periph->periph_name, periph->unit_number,
 1073                path->bus->sim->sim_name,
 1074                path->bus->sim->unit_number,
 1075                path->bus->sim->bus_id,
 1076                path->bus->path_id,
 1077                path->target->target_id,
 1078                (uintmax_t)path->device->lun_id);
 1079         printf("%s%d: ", periph->periph_name, periph->unit_number);
 1080         if (path->device->protocol == PROTO_SCSI)
 1081                 scsi_print_inquiry_short(&path->device->inq_data);
 1082         else if (path->device->protocol == PROTO_ATA ||
 1083             path->device->protocol == PROTO_SATAPM)
 1084                 ata_print_ident_short(&path->device->ident_data);
 1085         else if (path->device->protocol == PROTO_SEMB)
 1086                 semb_print_ident_short(
 1087                     (struct sep_identify_data *)&path->device->ident_data);
 1088         else
 1089                 printf("Unknown protocol device");
 1090         if (path->device->serial_num_len > 0)
 1091                 printf(" s/n %.60s", path->device->serial_num);
 1092         printf(" detached\n");
 1093 }
 1094 
 1095 
 1096 int
 1097 xpt_getattr(char *buf, size_t len, const char *attr, struct cam_path *path)
 1098 {
 1099         int ret = -1, l, o;
 1100         struct ccb_dev_advinfo cdai;
 1101         struct scsi_vpd_id_descriptor *idd;
 1102 
 1103         xpt_path_assert(path, MA_OWNED);
 1104 
 1105         memset(&cdai, 0, sizeof(cdai));
 1106         xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
 1107         cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
 1108         cdai.flags = CDAI_FLAG_NONE;
 1109         cdai.bufsiz = len;
 1110 
 1111         if (!strcmp(attr, "GEOM::ident"))
 1112                 cdai.buftype = CDAI_TYPE_SERIAL_NUM;
 1113         else if (!strcmp(attr, "GEOM::physpath"))
 1114                 cdai.buftype = CDAI_TYPE_PHYS_PATH;
 1115         else if (strcmp(attr, "GEOM::lunid") == 0 ||
 1116                  strcmp(attr, "GEOM::lunname") == 0) {
 1117                 cdai.buftype = CDAI_TYPE_SCSI_DEVID;
 1118                 cdai.bufsiz = CAM_SCSI_DEVID_MAXLEN;
 1119         } else
 1120                 goto out;
 1121 
 1122         cdai.buf = malloc(cdai.bufsiz, M_CAMXPT, M_NOWAIT|M_ZERO);
 1123         if (cdai.buf == NULL) {
 1124                 ret = ENOMEM;
 1125                 goto out;
 1126         }
 1127         xpt_action((union ccb *)&cdai); /* can only be synchronous */
 1128         if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
 1129                 cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE);
 1130         if (cdai.provsiz == 0)
 1131                 goto out;
 1132         if (cdai.buftype == CDAI_TYPE_SCSI_DEVID) {
 1133                 if (strcmp(attr, "GEOM::lunid") == 0) {
 1134                         idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf,
 1135                             cdai.provsiz, scsi_devid_is_lun_naa);
 1136                         if (idd == NULL)
 1137                                 idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf,
 1138                                     cdai.provsiz, scsi_devid_is_lun_eui64);
 1139                         if (idd == NULL)
 1140                                 idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf,
 1141                                     cdai.provsiz, scsi_devid_is_lun_uuid);
 1142                         if (idd == NULL)
 1143                                 idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf,
 1144                                     cdai.provsiz, scsi_devid_is_lun_md5);
 1145                 } else
 1146                         idd = NULL;
 1147                 if (idd == NULL)
 1148                         idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf,
 1149                             cdai.provsiz, scsi_devid_is_lun_t10);
 1150                 if (idd == NULL)
 1151                         idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf,
 1152                             cdai.provsiz, scsi_devid_is_lun_name);
 1153                 if (idd == NULL)
 1154                         goto out;
 1155                 ret = 0;
 1156                 if ((idd->proto_codeset & SVPD_ID_CODESET_MASK) == SVPD_ID_CODESET_ASCII) {
 1157                         if (idd->length < len) {
 1158                                 for (l = 0; l < idd->length; l++)
 1159                                         buf[l] = idd->identifier[l] ?
 1160                                             idd->identifier[l] : ' ';
 1161                                 buf[l] = 0;
 1162                         } else
 1163                                 ret = EFAULT;
 1164                 } else if ((idd->proto_codeset & SVPD_ID_CODESET_MASK) == SVPD_ID_CODESET_UTF8) {
 1165                         l = strnlen(idd->identifier, idd->length);
 1166                         if (l < len) {
 1167                                 bcopy(idd->identifier, buf, l);
 1168                                 buf[l] = 0;
 1169                         } else
 1170                                 ret = EFAULT;
 1171                 } else if ((idd->id_type & SVPD_ID_TYPE_MASK) == SVPD_ID_TYPE_UUID
 1172                     && idd->identifier[0] == 0x10) {
 1173                         if ((idd->length - 2) * 2 + 4 < len) {
 1174                                 for (l = 2, o = 0; l < idd->length; l++) {
 1175                                         if (l == 6 || l == 8 || l == 10 || l == 12)
 1176                                             o += sprintf(buf + o, "-");
 1177                                         o += sprintf(buf + o, "%02x",
 1178                                             idd->identifier[l]);
 1179                                 }
 1180                         } else
 1181                                 ret = EFAULT;
 1182                 } else {
 1183                         if (idd->length * 2 < len) {
 1184                                 for (l = 0; l < idd->length; l++)
 1185                                         sprintf(buf + l * 2, "%02x",
 1186                                             idd->identifier[l]);
 1187                         } else
 1188                                 ret = EFAULT;
 1189                 }
 1190         } else {
 1191                 ret = 0;
 1192                 if (strlcpy(buf, cdai.buf, len) >= len)
 1193                         ret = EFAULT;
 1194         }
 1195 
 1196 out:
 1197         if (cdai.buf != NULL)
 1198                 free(cdai.buf, M_CAMXPT);
 1199         return ret;
 1200 }
 1201 
 1202 static dev_match_ret
 1203 xptbusmatch(struct dev_match_pattern *patterns, u_int num_patterns,
 1204             struct cam_eb *bus)
 1205 {
 1206         dev_match_ret retval;
 1207         u_int i;
 1208 
 1209         retval = DM_RET_NONE;
 1210 
 1211         /*
 1212          * If we aren't given something to match against, that's an error.
 1213          */
 1214         if (bus == NULL)
 1215                 return(DM_RET_ERROR);
 1216 
 1217         /*
 1218          * If there are no match entries, then this bus matches no
 1219          * matter what.
 1220          */
 1221         if ((patterns == NULL) || (num_patterns == 0))
 1222                 return(DM_RET_DESCEND | DM_RET_COPY);
 1223 
 1224         for (i = 0; i < num_patterns; i++) {
 1225                 struct bus_match_pattern *cur_pattern;
 1226 
 1227                 /*
 1228                  * If the pattern in question isn't for a bus node, we
 1229                  * aren't interested.  However, we do indicate to the
 1230                  * calling routine that we should continue descending the
 1231                  * tree, since the user wants to match against lower-level
 1232                  * EDT elements.
 1233                  */
 1234                 if (patterns[i].type != DEV_MATCH_BUS) {
 1235                         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
 1236                                 retval |= DM_RET_DESCEND;
 1237                         continue;
 1238                 }
 1239 
 1240                 cur_pattern = &patterns[i].pattern.bus_pattern;
 1241 
 1242                 /*
 1243                  * If they want to match any bus node, we give them any
 1244                  * device node.
 1245                  */
 1246                 if (cur_pattern->flags == BUS_MATCH_ANY) {
 1247                         /* set the copy flag */
 1248                         retval |= DM_RET_COPY;
 1249 
 1250                         /*
 1251                          * If we've already decided on an action, go ahead
 1252                          * and return.
 1253                          */
 1254                         if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
 1255                                 return(retval);
 1256                 }
 1257 
 1258                 /*
 1259                  * Not sure why someone would do this...
 1260                  */
 1261                 if (cur_pattern->flags == BUS_MATCH_NONE)
 1262                         continue;
 1263 
 1264                 if (((cur_pattern->flags & BUS_MATCH_PATH) != 0)
 1265                  && (cur_pattern->path_id != bus->path_id))
 1266                         continue;
 1267 
 1268                 if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0)
 1269                  && (cur_pattern->bus_id != bus->sim->bus_id))
 1270                         continue;
 1271 
 1272                 if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0)
 1273                  && (cur_pattern->unit_number != bus->sim->unit_number))
 1274                         continue;
 1275 
 1276                 if (((cur_pattern->flags & BUS_MATCH_NAME) != 0)
 1277                  && (strncmp(cur_pattern->dev_name, bus->sim->sim_name,
 1278                              DEV_IDLEN) != 0))
 1279                         continue;
 1280 
 1281                 /*
 1282                  * If we get to this point, the user definitely wants
 1283                  * information on this bus.  So tell the caller to copy the
 1284                  * data out.
 1285                  */
 1286                 retval |= DM_RET_COPY;
 1287 
 1288                 /*
 1289                  * If the return action has been set to descend, then we
 1290                  * know that we've already seen a non-bus matching
 1291                  * expression, therefore we need to further descend the tree.
 1292                  * This won't change by continuing around the loop, so we
 1293                  * go ahead and return.  If we haven't seen a non-bus
 1294                  * matching expression, we keep going around the loop until
 1295                  * we exhaust the matching expressions.  We'll set the stop
 1296                  * flag once we fall out of the loop.
 1297                  */
 1298                 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
 1299                         return(retval);
 1300         }
 1301 
 1302         /*
 1303          * If the return action hasn't been set to descend yet, that means
 1304          * we haven't seen anything other than bus matching patterns.  So
 1305          * tell the caller to stop descending the tree -- the user doesn't
 1306          * want to match against lower level tree elements.
 1307          */
 1308         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
 1309                 retval |= DM_RET_STOP;
 1310 
 1311         return(retval);
 1312 }
 1313 
 1314 static dev_match_ret
 1315 xptdevicematch(struct dev_match_pattern *patterns, u_int num_patterns,
 1316                struct cam_ed *device)
 1317 {
 1318         dev_match_ret retval;
 1319         u_int i;
 1320 
 1321         retval = DM_RET_NONE;
 1322 
 1323         /*
 1324          * If we aren't given something to match against, that's an error.
 1325          */
 1326         if (device == NULL)
 1327                 return(DM_RET_ERROR);
 1328 
 1329         /*
 1330          * If there are no match entries, then this device matches no
 1331          * matter what.
 1332          */
 1333         if ((patterns == NULL) || (num_patterns == 0))
 1334                 return(DM_RET_DESCEND | DM_RET_COPY);
 1335 
 1336         for (i = 0; i < num_patterns; i++) {
 1337                 struct device_match_pattern *cur_pattern;
 1338                 struct scsi_vpd_device_id *device_id_page;
 1339 
 1340                 /*
 1341                  * If the pattern in question isn't for a device node, we
 1342                  * aren't interested.
 1343                  */
 1344                 if (patterns[i].type != DEV_MATCH_DEVICE) {
 1345                         if ((patterns[i].type == DEV_MATCH_PERIPH)
 1346                          && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE))
 1347                                 retval |= DM_RET_DESCEND;
 1348                         continue;
 1349                 }
 1350 
 1351                 cur_pattern = &patterns[i].pattern.device_pattern;
 1352 
 1353                 /* Error out if mutually exclusive options are specified. */ 
 1354                 if ((cur_pattern->flags & (DEV_MATCH_INQUIRY|DEV_MATCH_DEVID))
 1355                  == (DEV_MATCH_INQUIRY|DEV_MATCH_DEVID))
 1356                         return(DM_RET_ERROR);
 1357 
 1358                 /*
 1359                  * If they want to match any device node, we give them any
 1360                  * device node.
 1361                  */
 1362                 if (cur_pattern->flags == DEV_MATCH_ANY)
 1363                         goto copy_dev_node;
 1364 
 1365                 /*
 1366                  * Not sure why someone would do this...
 1367                  */
 1368                 if (cur_pattern->flags == DEV_MATCH_NONE)
 1369                         continue;
 1370 
 1371                 if (((cur_pattern->flags & DEV_MATCH_PATH) != 0)
 1372                  && (cur_pattern->path_id != device->target->bus->path_id))
 1373                         continue;
 1374 
 1375                 if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0)
 1376                  && (cur_pattern->target_id != device->target->target_id))
 1377                         continue;
 1378 
 1379                 if (((cur_pattern->flags & DEV_MATCH_LUN) != 0)
 1380                  && (cur_pattern->target_lun != device->lun_id))
 1381                         continue;
 1382 
 1383                 if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0)
 1384                  && (cam_quirkmatch((caddr_t)&device->inq_data,
 1385                                     (caddr_t)&cur_pattern->data.inq_pat,
 1386                                     1, sizeof(cur_pattern->data.inq_pat),
 1387                                     scsi_static_inquiry_match) == NULL))
 1388                         continue;
 1389 
 1390                 device_id_page = (struct scsi_vpd_device_id *)device->device_id;
 1391                 if (((cur_pattern->flags & DEV_MATCH_DEVID) != 0)
 1392                  && (device->device_id_len < SVPD_DEVICE_ID_HDR_LEN
 1393                   || scsi_devid_match((uint8_t *)device_id_page->desc_list,
 1394                                       device->device_id_len
 1395                                     - SVPD_DEVICE_ID_HDR_LEN,
 1396                                       cur_pattern->data.devid_pat.id,
 1397                                       cur_pattern->data.devid_pat.id_len) != 0))
 1398                         continue;
 1399 
 1400 copy_dev_node:
 1401                 /*
 1402                  * If we get to this point, the user definitely wants
 1403                  * information on this device.  So tell the caller to copy
 1404                  * the data out.
 1405                  */
 1406                 retval |= DM_RET_COPY;
 1407 
 1408                 /*
 1409                  * If the return action has been set to descend, then we
 1410                  * know that we've already seen a peripheral matching
 1411                  * expression, therefore we need to further descend the tree.
 1412                  * This won't change by continuing around the loop, so we
 1413                  * go ahead and return.  If we haven't seen a peripheral
 1414                  * matching expression, we keep going around the loop until
 1415                  * we exhaust the matching expressions.  We'll set the stop
 1416                  * flag once we fall out of the loop.
 1417                  */
 1418                 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
 1419                         return(retval);
 1420         }
 1421 
 1422         /*
 1423          * If the return action hasn't been set to descend yet, that means
 1424          * we haven't seen any peripheral matching patterns.  So tell the
 1425          * caller to stop descending the tree -- the user doesn't want to
 1426          * match against lower level tree elements.
 1427          */
 1428         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
 1429                 retval |= DM_RET_STOP;
 1430 
 1431         return(retval);
 1432 }
 1433 
 1434 /*
 1435  * Match a single peripheral against any number of match patterns.
 1436  */
 1437 static dev_match_ret
 1438 xptperiphmatch(struct dev_match_pattern *patterns, u_int num_patterns,
 1439                struct cam_periph *periph)
 1440 {
 1441         dev_match_ret retval;
 1442         u_int i;
 1443 
 1444         /*
 1445          * If we aren't given something to match against, that's an error.
 1446          */
 1447         if (periph == NULL)
 1448                 return(DM_RET_ERROR);
 1449 
 1450         /*
 1451          * If there are no match entries, then this peripheral matches no
 1452          * matter what.
 1453          */
 1454         if ((patterns == NULL) || (num_patterns == 0))
 1455                 return(DM_RET_STOP | DM_RET_COPY);
 1456 
 1457         /*
 1458          * There aren't any nodes below a peripheral node, so there's no
 1459          * reason to descend the tree any further.
 1460          */
 1461         retval = DM_RET_STOP;
 1462 
 1463         for (i = 0; i < num_patterns; i++) {
 1464                 struct periph_match_pattern *cur_pattern;
 1465 
 1466                 /*
 1467                  * If the pattern in question isn't for a peripheral, we
 1468                  * aren't interested.
 1469                  */
 1470                 if (patterns[i].type != DEV_MATCH_PERIPH)
 1471                         continue;
 1472 
 1473                 cur_pattern = &patterns[i].pattern.periph_pattern;
 1474 
 1475                 /*
 1476                  * If they want to match on anything, then we will do so.
 1477                  */
 1478                 if (cur_pattern->flags == PERIPH_MATCH_ANY) {
 1479                         /* set the copy flag */
 1480                         retval |= DM_RET_COPY;
 1481 
 1482                         /*
 1483                          * We've already set the return action to stop,
 1484                          * since there are no nodes below peripherals in
 1485                          * the tree.
 1486                          */
 1487                         return(retval);
 1488                 }
 1489 
 1490                 /*
 1491                  * Not sure why someone would do this...
 1492                  */
 1493                 if (cur_pattern->flags == PERIPH_MATCH_NONE)
 1494                         continue;
 1495 
 1496                 if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0)
 1497                  && (cur_pattern->path_id != periph->path->bus->path_id))
 1498                         continue;
 1499 
 1500                 /*
 1501                  * For the target and lun id's, we have to make sure the
 1502                  * target and lun pointers aren't NULL.  The xpt peripheral
 1503                  * has a wildcard target and device.
 1504                  */
 1505                 if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0)
 1506                  && ((periph->path->target == NULL)
 1507                  ||(cur_pattern->target_id != periph->path->target->target_id)))
 1508                         continue;
 1509 
 1510                 if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0)
 1511                  && ((periph->path->device == NULL)
 1512                  || (cur_pattern->target_lun != periph->path->device->lun_id)))
 1513                         continue;
 1514 
 1515                 if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0)
 1516                  && (cur_pattern->unit_number != periph->unit_number))
 1517                         continue;
 1518 
 1519                 if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0)
 1520                  && (strncmp(cur_pattern->periph_name, periph->periph_name,
 1521                              DEV_IDLEN) != 0))
 1522                         continue;
 1523 
 1524                 /*
 1525                  * If we get to this point, the user definitely wants
 1526                  * information on this peripheral.  So tell the caller to
 1527                  * copy the data out.
 1528                  */
 1529                 retval |= DM_RET_COPY;
 1530 
 1531                 /*
 1532                  * The return action has already been set to stop, since
 1533                  * peripherals don't have any nodes below them in the EDT.
 1534                  */
 1535                 return(retval);
 1536         }
 1537 
 1538         /*
 1539          * If we get to this point, the peripheral that was passed in
 1540          * doesn't match any of the patterns.
 1541          */
 1542         return(retval);
 1543 }
 1544 
 1545 static int
 1546 xptedtbusfunc(struct cam_eb *bus, void *arg)
 1547 {
 1548         struct ccb_dev_match *cdm;
 1549         struct cam_et *target;
 1550         dev_match_ret retval;
 1551 
 1552         cdm = (struct ccb_dev_match *)arg;
 1553 
 1554         /*
 1555          * If our position is for something deeper in the tree, that means
 1556          * that we've already seen this node.  So, we keep going down.
 1557          */
 1558         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 1559          && (cdm->pos.cookie.bus == bus)
 1560          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 1561          && (cdm->pos.cookie.target != NULL))
 1562                 retval = DM_RET_DESCEND;
 1563         else
 1564                 retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus);
 1565 
 1566         /*
 1567          * If we got an error, bail out of the search.
 1568          */
 1569         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
 1570                 cdm->status = CAM_DEV_MATCH_ERROR;
 1571                 return(0);
 1572         }
 1573 
 1574         /*
 1575          * If the copy flag is set, copy this bus out.
 1576          */
 1577         if (retval & DM_RET_COPY) {
 1578                 int spaceleft, j;
 1579 
 1580                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
 1581                         sizeof(struct dev_match_result));
 1582 
 1583                 /*
 1584                  * If we don't have enough space to put in another
 1585                  * match result, save our position and tell the
 1586                  * user there are more devices to check.
 1587                  */
 1588                 if (spaceleft < sizeof(struct dev_match_result)) {
 1589                         bzero(&cdm->pos, sizeof(cdm->pos));
 1590                         cdm->pos.position_type =
 1591                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS;
 1592 
 1593                         cdm->pos.cookie.bus = bus;
 1594                         cdm->pos.generations[CAM_BUS_GENERATION]=
 1595                                 xsoftc.bus_generation;
 1596                         cdm->status = CAM_DEV_MATCH_MORE;
 1597                         return(0);
 1598                 }
 1599                 j = cdm->num_matches;
 1600                 cdm->num_matches++;
 1601                 cdm->matches[j].type = DEV_MATCH_BUS;
 1602                 cdm->matches[j].result.bus_result.path_id = bus->path_id;
 1603                 cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id;
 1604                 cdm->matches[j].result.bus_result.unit_number =
 1605                         bus->sim->unit_number;
 1606                 strncpy(cdm->matches[j].result.bus_result.dev_name,
 1607                         bus->sim->sim_name, DEV_IDLEN);
 1608         }
 1609 
 1610         /*
 1611          * If the user is only interested in busses, there's no
 1612          * reason to descend to the next level in the tree.
 1613          */
 1614         if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
 1615                 return(1);
 1616 
 1617         /*
 1618          * If there is a target generation recorded, check it to
 1619          * make sure the target list hasn't changed.
 1620          */
 1621         mtx_lock(&bus->eb_mtx);
 1622         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 1623          && (cdm->pos.cookie.bus == bus)
 1624          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 1625          && (cdm->pos.cookie.target != NULL)) {
 1626                 if ((cdm->pos.generations[CAM_TARGET_GENERATION] !=
 1627                     bus->generation)) {
 1628                         mtx_unlock(&bus->eb_mtx);
 1629                         cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
 1630                         return (0);
 1631                 }
 1632                 target = (struct cam_et *)cdm->pos.cookie.target;
 1633                 target->refcount++;
 1634         } else
 1635                 target = NULL;
 1636         mtx_unlock(&bus->eb_mtx);
 1637 
 1638         return (xpttargettraverse(bus, target, xptedttargetfunc, arg));
 1639 }
 1640 
 1641 static int
 1642 xptedttargetfunc(struct cam_et *target, void *arg)
 1643 {
 1644         struct ccb_dev_match *cdm;
 1645         struct cam_eb *bus;
 1646         struct cam_ed *device;
 1647 
 1648         cdm = (struct ccb_dev_match *)arg;
 1649         bus = target->bus;
 1650 
 1651         /*
 1652          * If there is a device list generation recorded, check it to
 1653          * make sure the device list hasn't changed.
 1654          */
 1655         mtx_lock(&bus->eb_mtx);
 1656         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 1657          && (cdm->pos.cookie.bus == bus)
 1658          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 1659          && (cdm->pos.cookie.target == target)
 1660          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
 1661          && (cdm->pos.cookie.device != NULL)) {
 1662                 if (cdm->pos.generations[CAM_DEV_GENERATION] !=
 1663                     target->generation) {
 1664                         mtx_unlock(&bus->eb_mtx);
 1665                         cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
 1666                         return(0);
 1667                 }
 1668                 device = (struct cam_ed *)cdm->pos.cookie.device;
 1669                 device->refcount++;
 1670         } else
 1671                 device = NULL;
 1672         mtx_unlock(&bus->eb_mtx);
 1673 
 1674         return (xptdevicetraverse(target, device, xptedtdevicefunc, arg));
 1675 }
 1676 
 1677 static int
 1678 xptedtdevicefunc(struct cam_ed *device, void *arg)
 1679 {
 1680         struct cam_eb *bus;
 1681         struct cam_periph *periph;
 1682         struct ccb_dev_match *cdm;
 1683         dev_match_ret retval;
 1684 
 1685         cdm = (struct ccb_dev_match *)arg;
 1686         bus = device->target->bus;
 1687 
 1688         /*
 1689          * If our position is for something deeper in the tree, that means
 1690          * that we've already seen this node.  So, we keep going down.
 1691          */
 1692         if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE)
 1693          && (cdm->pos.cookie.device == device)
 1694          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
 1695          && (cdm->pos.cookie.periph != NULL))
 1696                 retval = DM_RET_DESCEND;
 1697         else
 1698                 retval = xptdevicematch(cdm->patterns, cdm->num_patterns,
 1699                                         device);
 1700 
 1701         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
 1702                 cdm->status = CAM_DEV_MATCH_ERROR;
 1703                 return(0);
 1704         }
 1705 
 1706         /*
 1707          * If the copy flag is set, copy this device out.
 1708          */
 1709         if (retval & DM_RET_COPY) {
 1710                 int spaceleft, j;
 1711 
 1712                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
 1713                         sizeof(struct dev_match_result));
 1714 
 1715                 /*
 1716                  * If we don't have enough space to put in another
 1717                  * match result, save our position and tell the
 1718                  * user there are more devices to check.
 1719                  */
 1720                 if (spaceleft < sizeof(struct dev_match_result)) {
 1721                         bzero(&cdm->pos, sizeof(cdm->pos));
 1722                         cdm->pos.position_type =
 1723                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
 1724                                 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE;
 1725 
 1726                         cdm->pos.cookie.bus = device->target->bus;
 1727                         cdm->pos.generations[CAM_BUS_GENERATION]=
 1728                                 xsoftc.bus_generation;
 1729                         cdm->pos.cookie.target = device->target;
 1730                         cdm->pos.generations[CAM_TARGET_GENERATION] =
 1731                                 device->target->bus->generation;
 1732                         cdm->pos.cookie.device = device;
 1733                         cdm->pos.generations[CAM_DEV_GENERATION] =
 1734                                 device->target->generation;
 1735                         cdm->status = CAM_DEV_MATCH_MORE;
 1736                         return(0);
 1737                 }
 1738                 j = cdm->num_matches;
 1739                 cdm->num_matches++;
 1740                 cdm->matches[j].type = DEV_MATCH_DEVICE;
 1741                 cdm->matches[j].result.device_result.path_id =
 1742                         device->target->bus->path_id;
 1743                 cdm->matches[j].result.device_result.target_id =
 1744                         device->target->target_id;
 1745                 cdm->matches[j].result.device_result.target_lun =
 1746                         device->lun_id;
 1747                 cdm->matches[j].result.device_result.protocol =
 1748                         device->protocol;
 1749                 bcopy(&device->inq_data,
 1750                       &cdm->matches[j].result.device_result.inq_data,
 1751                       sizeof(struct scsi_inquiry_data));
 1752                 bcopy(&device->ident_data,
 1753                       &cdm->matches[j].result.device_result.ident_data,
 1754                       sizeof(struct ata_params));
 1755 
 1756                 /* Let the user know whether this device is unconfigured */
 1757                 if (device->flags & CAM_DEV_UNCONFIGURED)
 1758                         cdm->matches[j].result.device_result.flags =
 1759                                 DEV_RESULT_UNCONFIGURED;
 1760                 else
 1761                         cdm->matches[j].result.device_result.flags =
 1762                                 DEV_RESULT_NOFLAG;
 1763         }
 1764 
 1765         /*
 1766          * If the user isn't interested in peripherals, don't descend
 1767          * the tree any further.
 1768          */
 1769         if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
 1770                 return(1);
 1771 
 1772         /*
 1773          * If there is a peripheral list generation recorded, make sure
 1774          * it hasn't changed.
 1775          */
 1776         xpt_lock_buses();
 1777         mtx_lock(&bus->eb_mtx);
 1778         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 1779          && (cdm->pos.cookie.bus == bus)
 1780          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 1781          && (cdm->pos.cookie.target == device->target)
 1782          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
 1783          && (cdm->pos.cookie.device == device)
 1784          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
 1785          && (cdm->pos.cookie.periph != NULL)) {
 1786                 if (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
 1787                     device->generation) {
 1788                         mtx_unlock(&bus->eb_mtx);
 1789                         xpt_unlock_buses();
 1790                         cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
 1791                         return(0);
 1792                 }
 1793                 periph = (struct cam_periph *)cdm->pos.cookie.periph;
 1794                 periph->refcount++;
 1795         } else
 1796                 periph = NULL;
 1797         mtx_unlock(&bus->eb_mtx);
 1798         xpt_unlock_buses();
 1799 
 1800         return (xptperiphtraverse(device, periph, xptedtperiphfunc, arg));
 1801 }
 1802 
 1803 static int
 1804 xptedtperiphfunc(struct cam_periph *periph, void *arg)
 1805 {
 1806         struct ccb_dev_match *cdm;
 1807         dev_match_ret retval;
 1808 
 1809         cdm = (struct ccb_dev_match *)arg;
 1810 
 1811         retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
 1812 
 1813         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
 1814                 cdm->status = CAM_DEV_MATCH_ERROR;
 1815                 return(0);
 1816         }
 1817 
 1818         /*
 1819          * If the copy flag is set, copy this peripheral out.
 1820          */
 1821         if (retval & DM_RET_COPY) {
 1822                 int spaceleft, j;
 1823 
 1824                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
 1825                         sizeof(struct dev_match_result));
 1826 
 1827                 /*
 1828                  * If we don't have enough space to put in another
 1829                  * match result, save our position and tell the
 1830                  * user there are more devices to check.
 1831                  */
 1832                 if (spaceleft < sizeof(struct dev_match_result)) {
 1833                         bzero(&cdm->pos, sizeof(cdm->pos));
 1834                         cdm->pos.position_type =
 1835                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
 1836                                 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE |
 1837                                 CAM_DEV_POS_PERIPH;
 1838 
 1839                         cdm->pos.cookie.bus = periph->path->bus;
 1840                         cdm->pos.generations[CAM_BUS_GENERATION]=
 1841                                 xsoftc.bus_generation;
 1842                         cdm->pos.cookie.target = periph->path->target;
 1843                         cdm->pos.generations[CAM_TARGET_GENERATION] =
 1844                                 periph->path->bus->generation;
 1845                         cdm->pos.cookie.device = periph->path->device;
 1846                         cdm->pos.generations[CAM_DEV_GENERATION] =
 1847                                 periph->path->target->generation;
 1848                         cdm->pos.cookie.periph = periph;
 1849                         cdm->pos.generations[CAM_PERIPH_GENERATION] =
 1850                                 periph->path->device->generation;
 1851                         cdm->status = CAM_DEV_MATCH_MORE;
 1852                         return(0);
 1853                 }
 1854 
 1855                 j = cdm->num_matches;
 1856                 cdm->num_matches++;
 1857                 cdm->matches[j].type = DEV_MATCH_PERIPH;
 1858                 cdm->matches[j].result.periph_result.path_id =
 1859                         periph->path->bus->path_id;
 1860                 cdm->matches[j].result.periph_result.target_id =
 1861                         periph->path->target->target_id;
 1862                 cdm->matches[j].result.periph_result.target_lun =
 1863                         periph->path->device->lun_id;
 1864                 cdm->matches[j].result.periph_result.unit_number =
 1865                         periph->unit_number;
 1866                 strncpy(cdm->matches[j].result.periph_result.periph_name,
 1867                         periph->periph_name, DEV_IDLEN);
 1868         }
 1869 
 1870         return(1);
 1871 }
 1872 
 1873 static int
 1874 xptedtmatch(struct ccb_dev_match *cdm)
 1875 {
 1876         struct cam_eb *bus;
 1877         int ret;
 1878 
 1879         cdm->num_matches = 0;
 1880 
 1881         /*
 1882          * Check the bus list generation.  If it has changed, the user
 1883          * needs to reset everything and start over.
 1884          */
 1885         xpt_lock_buses();
 1886         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 1887          && (cdm->pos.cookie.bus != NULL)) {
 1888                 if (cdm->pos.generations[CAM_BUS_GENERATION] !=
 1889                     xsoftc.bus_generation) {
 1890                         xpt_unlock_buses();
 1891                         cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
 1892                         return(0);
 1893                 }
 1894                 bus = (struct cam_eb *)cdm->pos.cookie.bus;
 1895                 bus->refcount++;
 1896         } else
 1897                 bus = NULL;
 1898         xpt_unlock_buses();
 1899 
 1900         ret = xptbustraverse(bus, xptedtbusfunc, cdm);
 1901 
 1902         /*
 1903          * If we get back 0, that means that we had to stop before fully
 1904          * traversing the EDT.  It also means that one of the subroutines
 1905          * has set the status field to the proper value.  If we get back 1,
 1906          * we've fully traversed the EDT and copied out any matching entries.
 1907          */
 1908         if (ret == 1)
 1909                 cdm->status = CAM_DEV_MATCH_LAST;
 1910 
 1911         return(ret);
 1912 }
 1913 
 1914 static int
 1915 xptplistpdrvfunc(struct periph_driver **pdrv, void *arg)
 1916 {
 1917         struct cam_periph *periph;
 1918         struct ccb_dev_match *cdm;
 1919 
 1920         cdm = (struct ccb_dev_match *)arg;
 1921 
 1922         xpt_lock_buses();
 1923         if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
 1924          && (cdm->pos.cookie.pdrv == pdrv)
 1925          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
 1926          && (cdm->pos.cookie.periph != NULL)) {
 1927                 if (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
 1928                     (*pdrv)->generation) {
 1929                         xpt_unlock_buses();
 1930                         cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
 1931                         return(0);
 1932                 }
 1933                 periph = (struct cam_periph *)cdm->pos.cookie.periph;
 1934                 periph->refcount++;
 1935         } else
 1936                 periph = NULL;
 1937         xpt_unlock_buses();
 1938 
 1939         return (xptpdperiphtraverse(pdrv, periph, xptplistperiphfunc, arg));
 1940 }
 1941 
 1942 static int
 1943 xptplistperiphfunc(struct cam_periph *periph, void *arg)
 1944 {
 1945         struct ccb_dev_match *cdm;
 1946         dev_match_ret retval;
 1947 
 1948         cdm = (struct ccb_dev_match *)arg;
 1949 
 1950         retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
 1951 
 1952         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
 1953                 cdm->status = CAM_DEV_MATCH_ERROR;
 1954                 return(0);
 1955         }
 1956 
 1957         /*
 1958          * If the copy flag is set, copy this peripheral out.
 1959          */
 1960         if (retval & DM_RET_COPY) {
 1961                 int spaceleft, j;
 1962 
 1963                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
 1964                         sizeof(struct dev_match_result));
 1965 
 1966                 /*
 1967                  * If we don't have enough space to put in another
 1968                  * match result, save our position and tell the
 1969                  * user there are more devices to check.
 1970                  */
 1971                 if (spaceleft < sizeof(struct dev_match_result)) {
 1972                         struct periph_driver **pdrv;
 1973 
 1974                         pdrv = NULL;
 1975                         bzero(&cdm->pos, sizeof(cdm->pos));
 1976                         cdm->pos.position_type =
 1977                                 CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR |
 1978                                 CAM_DEV_POS_PERIPH;
 1979 
 1980                         /*
 1981                          * This may look a bit non-sensical, but it is
 1982                          * actually quite logical.  There are very few
 1983                          * peripheral drivers, and bloating every peripheral
 1984                          * structure with a pointer back to its parent
 1985                          * peripheral driver linker set entry would cost
 1986                          * more in the long run than doing this quick lookup.
 1987                          */
 1988                         for (pdrv = periph_drivers; *pdrv != NULL; pdrv++) {
 1989                                 if (strcmp((*pdrv)->driver_name,
 1990                                     periph->periph_name) == 0)
 1991                                         break;
 1992                         }
 1993 
 1994                         if (*pdrv == NULL) {
 1995                                 cdm->status = CAM_DEV_MATCH_ERROR;
 1996                                 return(0);
 1997                         }
 1998 
 1999                         cdm->pos.cookie.pdrv = pdrv;
 2000                         /*
 2001                          * The periph generation slot does double duty, as
 2002                          * does the periph pointer slot.  They are used for
 2003                          * both edt and pdrv lookups and positioning.
 2004                          */
 2005                         cdm->pos.cookie.periph = periph;
 2006                         cdm->pos.generations[CAM_PERIPH_GENERATION] =
 2007                                 (*pdrv)->generation;
 2008                         cdm->status = CAM_DEV_MATCH_MORE;
 2009                         return(0);
 2010                 }
 2011 
 2012                 j = cdm->num_matches;
 2013                 cdm->num_matches++;
 2014                 cdm->matches[j].type = DEV_MATCH_PERIPH;
 2015                 cdm->matches[j].result.periph_result.path_id =
 2016                         periph->path->bus->path_id;
 2017 
 2018                 /*
 2019                  * The transport layer peripheral doesn't have a target or
 2020                  * lun.
 2021                  */
 2022                 if (periph->path->target)
 2023                         cdm->matches[j].result.periph_result.target_id =
 2024                                 periph->path->target->target_id;
 2025                 else
 2026                         cdm->matches[j].result.periph_result.target_id =
 2027                                 CAM_TARGET_WILDCARD;
 2028 
 2029                 if (periph->path->device)
 2030                         cdm->matches[j].result.periph_result.target_lun =
 2031                                 periph->path->device->lun_id;
 2032                 else
 2033                         cdm->matches[j].result.periph_result.target_lun =
 2034                                 CAM_LUN_WILDCARD;
 2035 
 2036                 cdm->matches[j].result.periph_result.unit_number =
 2037                         periph->unit_number;
 2038                 strncpy(cdm->matches[j].result.periph_result.periph_name,
 2039                         periph->periph_name, DEV_IDLEN);
 2040         }
 2041 
 2042         return(1);
 2043 }
 2044 
 2045 static int
 2046 xptperiphlistmatch(struct ccb_dev_match *cdm)
 2047 {
 2048         int ret;
 2049 
 2050         cdm->num_matches = 0;
 2051 
 2052         /*
 2053          * At this point in the edt traversal function, we check the bus
 2054          * list generation to make sure that no busses have been added or
 2055          * removed since the user last sent a XPT_DEV_MATCH ccb through.
 2056          * For the peripheral driver list traversal function, however, we
 2057          * don't have to worry about new peripheral driver types coming or
 2058          * going; they're in a linker set, and therefore can't change
 2059          * without a recompile.
 2060          */
 2061 
 2062         if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
 2063          && (cdm->pos.cookie.pdrv != NULL))
 2064                 ret = xptpdrvtraverse(
 2065                                 (struct periph_driver **)cdm->pos.cookie.pdrv,
 2066                                 xptplistpdrvfunc, cdm);
 2067         else
 2068                 ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm);
 2069 
 2070         /*
 2071          * If we get back 0, that means that we had to stop before fully
 2072          * traversing the peripheral driver tree.  It also means that one of
 2073          * the subroutines has set the status field to the proper value.  If
 2074          * we get back 1, we've fully traversed the EDT and copied out any
 2075          * matching entries.
 2076          */
 2077         if (ret == 1)
 2078                 cdm->status = CAM_DEV_MATCH_LAST;
 2079 
 2080         return(ret);
 2081 }
 2082 
 2083 static int
 2084 xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg)
 2085 {
 2086         struct cam_eb *bus, *next_bus;
 2087         int retval;
 2088 
 2089         retval = 1;
 2090         if (start_bus)
 2091                 bus = start_bus;
 2092         else {
 2093                 xpt_lock_buses();
 2094                 bus = TAILQ_FIRST(&xsoftc.xpt_busses);
 2095                 if (bus == NULL) {
 2096                         xpt_unlock_buses();
 2097                         return (retval);
 2098                 }
 2099                 bus->refcount++;
 2100                 xpt_unlock_buses();
 2101         }
 2102         for (; bus != NULL; bus = next_bus) {
 2103                 retval = tr_func(bus, arg);
 2104                 if (retval == 0) {
 2105                         xpt_release_bus(bus);
 2106                         break;
 2107                 }
 2108                 xpt_lock_buses();
 2109                 next_bus = TAILQ_NEXT(bus, links);
 2110                 if (next_bus)
 2111                         next_bus->refcount++;
 2112                 xpt_unlock_buses();
 2113                 xpt_release_bus(bus);
 2114         }
 2115         return(retval);
 2116 }
 2117 
 2118 static int
 2119 xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target,
 2120                   xpt_targetfunc_t *tr_func, void *arg)
 2121 {
 2122         struct cam_et *target, *next_target;
 2123         int retval;
 2124 
 2125         retval = 1;
 2126         if (start_target)
 2127                 target = start_target;
 2128         else {
 2129                 mtx_lock(&bus->eb_mtx);
 2130                 target = TAILQ_FIRST(&bus->et_entries);
 2131                 if (target == NULL) {
 2132                         mtx_unlock(&bus->eb_mtx);
 2133                         return (retval);
 2134                 }
 2135                 target->refcount++;
 2136                 mtx_unlock(&bus->eb_mtx);
 2137         }
 2138         for (; target != NULL; target = next_target) {
 2139                 retval = tr_func(target, arg);
 2140                 if (retval == 0) {
 2141                         xpt_release_target(target);
 2142                         break;
 2143                 }
 2144                 mtx_lock(&bus->eb_mtx);
 2145                 next_target = TAILQ_NEXT(target, links);
 2146                 if (next_target)
 2147                         next_target->refcount++;
 2148                 mtx_unlock(&bus->eb_mtx);
 2149                 xpt_release_target(target);
 2150         }
 2151         return(retval);
 2152 }
 2153 
 2154 static int
 2155 xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device,
 2156                   xpt_devicefunc_t *tr_func, void *arg)
 2157 {
 2158         struct cam_eb *bus;
 2159         struct cam_ed *device, *next_device;
 2160         int retval;
 2161 
 2162         retval = 1;
 2163         bus = target->bus;
 2164         if (start_device)
 2165                 device = start_device;
 2166         else {
 2167                 mtx_lock(&bus->eb_mtx);
 2168                 device = TAILQ_FIRST(&target->ed_entries);
 2169                 if (device == NULL) {
 2170                         mtx_unlock(&bus->eb_mtx);
 2171                         return (retval);
 2172                 }
 2173                 device->refcount++;
 2174                 mtx_unlock(&bus->eb_mtx);
 2175         }
 2176         for (; device != NULL; device = next_device) {
 2177                 mtx_lock(&device->device_mtx);
 2178                 retval = tr_func(device, arg);
 2179                 mtx_unlock(&device->device_mtx);
 2180                 if (retval == 0) {
 2181                         xpt_release_device(device);
 2182                         break;
 2183                 }
 2184                 mtx_lock(&bus->eb_mtx);
 2185                 next_device = TAILQ_NEXT(device, links);
 2186                 if (next_device)
 2187                         next_device->refcount++;
 2188                 mtx_unlock(&bus->eb_mtx);
 2189                 xpt_release_device(device);
 2190         }
 2191         return(retval);
 2192 }
 2193 
 2194 static int
 2195 xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph,
 2196                   xpt_periphfunc_t *tr_func, void *arg)
 2197 {
 2198         struct cam_eb *bus;
 2199         struct cam_periph *periph, *next_periph;
 2200         int retval;
 2201 
 2202         retval = 1;
 2203 
 2204         bus = device->target->bus;
 2205         if (start_periph)
 2206                 periph = start_periph;
 2207         else {
 2208                 xpt_lock_buses();
 2209                 mtx_lock(&bus->eb_mtx);
 2210                 periph = SLIST_FIRST(&device->periphs);
 2211                 while (periph != NULL && (periph->flags & CAM_PERIPH_FREE) != 0)
 2212                         periph = SLIST_NEXT(periph, periph_links);
 2213                 if (periph == NULL) {
 2214                         mtx_unlock(&bus->eb_mtx);
 2215                         xpt_unlock_buses();
 2216                         return (retval);
 2217                 }
 2218                 periph->refcount++;
 2219                 mtx_unlock(&bus->eb_mtx);
 2220                 xpt_unlock_buses();
 2221         }
 2222         for (; periph != NULL; periph = next_periph) {
 2223                 retval = tr_func(periph, arg);
 2224                 if (retval == 0) {
 2225                         cam_periph_release_locked(periph);
 2226                         break;
 2227                 }
 2228                 xpt_lock_buses();
 2229                 mtx_lock(&bus->eb_mtx);
 2230                 next_periph = SLIST_NEXT(periph, periph_links);
 2231                 while (next_periph != NULL &&
 2232                     (next_periph->flags & CAM_PERIPH_FREE) != 0)
 2233                         next_periph = SLIST_NEXT(next_periph, periph_links);
 2234                 if (next_periph)
 2235                         next_periph->refcount++;
 2236                 mtx_unlock(&bus->eb_mtx);
 2237                 xpt_unlock_buses();
 2238                 cam_periph_release_locked(periph);
 2239         }
 2240         return(retval);
 2241 }
 2242 
 2243 static int
 2244 xptpdrvtraverse(struct periph_driver **start_pdrv,
 2245                 xpt_pdrvfunc_t *tr_func, void *arg)
 2246 {
 2247         struct periph_driver **pdrv;
 2248         int retval;
 2249 
 2250         retval = 1;
 2251 
 2252         /*
 2253          * We don't traverse the peripheral driver list like we do the
 2254          * other lists, because it is a linker set, and therefore cannot be
 2255          * changed during runtime.  If the peripheral driver list is ever
 2256          * re-done to be something other than a linker set (i.e. it can
 2257          * change while the system is running), the list traversal should
 2258          * be modified to work like the other traversal functions.
 2259          */
 2260         for (pdrv = (start_pdrv ? start_pdrv : periph_drivers);
 2261              *pdrv != NULL; pdrv++) {
 2262                 retval = tr_func(pdrv, arg);
 2263 
 2264                 if (retval == 0)
 2265                         return(retval);
 2266         }
 2267 
 2268         return(retval);
 2269 }
 2270 
 2271 static int
 2272 xptpdperiphtraverse(struct periph_driver **pdrv,
 2273                     struct cam_periph *start_periph,
 2274                     xpt_periphfunc_t *tr_func, void *arg)
 2275 {
 2276         struct cam_periph *periph, *next_periph;
 2277         int retval;
 2278 
 2279         retval = 1;
 2280 
 2281         if (start_periph)
 2282                 periph = start_periph;
 2283         else {
 2284                 xpt_lock_buses();
 2285                 periph = TAILQ_FIRST(&(*pdrv)->units);
 2286                 while (periph != NULL && (periph->flags & CAM_PERIPH_FREE) != 0)
 2287                         periph = TAILQ_NEXT(periph, unit_links);
 2288                 if (periph == NULL) {
 2289                         xpt_unlock_buses();
 2290                         return (retval);
 2291                 }
 2292                 periph->refcount++;
 2293                 xpt_unlock_buses();
 2294         }
 2295         for (; periph != NULL; periph = next_periph) {
 2296                 cam_periph_lock(periph);
 2297                 retval = tr_func(periph, arg);
 2298                 cam_periph_unlock(periph);
 2299                 if (retval == 0) {
 2300                         cam_periph_release(periph);
 2301                         break;
 2302                 }
 2303                 xpt_lock_buses();
 2304                 next_periph = TAILQ_NEXT(periph, unit_links);
 2305                 while (next_periph != NULL &&
 2306                     (next_periph->flags & CAM_PERIPH_FREE) != 0)
 2307                         next_periph = TAILQ_NEXT(next_periph, unit_links);
 2308                 if (next_periph)
 2309                         next_periph->refcount++;
 2310                 xpt_unlock_buses();
 2311                 cam_periph_release(periph);
 2312         }
 2313         return(retval);
 2314 }
 2315 
 2316 static int
 2317 xptdefbusfunc(struct cam_eb *bus, void *arg)
 2318 {
 2319         struct xpt_traverse_config *tr_config;
 2320 
 2321         tr_config = (struct xpt_traverse_config *)arg;
 2322 
 2323         if (tr_config->depth == XPT_DEPTH_BUS) {
 2324                 xpt_busfunc_t *tr_func;
 2325 
 2326                 tr_func = (xpt_busfunc_t *)tr_config->tr_func;
 2327 
 2328                 return(tr_func(bus, tr_config->tr_arg));
 2329         } else
 2330                 return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg));
 2331 }
 2332 
 2333 static int
 2334 xptdeftargetfunc(struct cam_et *target, void *arg)
 2335 {
 2336         struct xpt_traverse_config *tr_config;
 2337 
 2338         tr_config = (struct xpt_traverse_config *)arg;
 2339 
 2340         if (tr_config->depth == XPT_DEPTH_TARGET) {
 2341                 xpt_targetfunc_t *tr_func;
 2342 
 2343                 tr_func = (xpt_targetfunc_t *)tr_config->tr_func;
 2344 
 2345                 return(tr_func(target, tr_config->tr_arg));
 2346         } else
 2347                 return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg));
 2348 }
 2349 
 2350 static int
 2351 xptdefdevicefunc(struct cam_ed *device, void *arg)
 2352 {
 2353         struct xpt_traverse_config *tr_config;
 2354 
 2355         tr_config = (struct xpt_traverse_config *)arg;
 2356 
 2357         if (tr_config->depth == XPT_DEPTH_DEVICE) {
 2358                 xpt_devicefunc_t *tr_func;
 2359 
 2360                 tr_func = (xpt_devicefunc_t *)tr_config->tr_func;
 2361 
 2362                 return(tr_func(device, tr_config->tr_arg));
 2363         } else
 2364                 return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg));
 2365 }
 2366 
 2367 static int
 2368 xptdefperiphfunc(struct cam_periph *periph, void *arg)
 2369 {
 2370         struct xpt_traverse_config *tr_config;
 2371         xpt_periphfunc_t *tr_func;
 2372 
 2373         tr_config = (struct xpt_traverse_config *)arg;
 2374 
 2375         tr_func = (xpt_periphfunc_t *)tr_config->tr_func;
 2376 
 2377         /*
 2378          * Unlike the other default functions, we don't check for depth
 2379          * here.  The peripheral driver level is the last level in the EDT,
 2380          * so if we're here, we should execute the function in question.
 2381          */
 2382         return(tr_func(periph, tr_config->tr_arg));
 2383 }
 2384 
 2385 /*
 2386  * Execute the given function for every bus in the EDT.
 2387  */
 2388 static int
 2389 xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg)
 2390 {
 2391         struct xpt_traverse_config tr_config;
 2392 
 2393         tr_config.depth = XPT_DEPTH_BUS;
 2394         tr_config.tr_func = tr_func;
 2395         tr_config.tr_arg = arg;
 2396 
 2397         return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
 2398 }
 2399 
 2400 /*
 2401  * Execute the given function for every device in the EDT.
 2402  */
 2403 static int
 2404 xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg)
 2405 {
 2406         struct xpt_traverse_config tr_config;
 2407 
 2408         tr_config.depth = XPT_DEPTH_DEVICE;
 2409         tr_config.tr_func = tr_func;
 2410         tr_config.tr_arg = arg;
 2411 
 2412         return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
 2413 }
 2414 
 2415 static int
 2416 xptsetasyncfunc(struct cam_ed *device, void *arg)
 2417 {
 2418         struct cam_path path;
 2419         struct ccb_getdev cgd;
 2420         struct ccb_setasync *csa = (struct ccb_setasync *)arg;
 2421 
 2422         /*
 2423          * Don't report unconfigured devices (Wildcard devs,
 2424          * devices only for target mode, device instances
 2425          * that have been invalidated but are waiting for
 2426          * their last reference count to be released).
 2427          */
 2428         if ((device->flags & CAM_DEV_UNCONFIGURED) != 0)
 2429                 return (1);
 2430 
 2431         xpt_compile_path(&path,
 2432                          NULL,
 2433                          device->target->bus->path_id,
 2434                          device->target->target_id,
 2435                          device->lun_id);
 2436         xpt_setup_ccb(&cgd.ccb_h, &path, CAM_PRIORITY_NORMAL);
 2437         cgd.ccb_h.func_code = XPT_GDEV_TYPE;
 2438         xpt_action((union ccb *)&cgd);
 2439         csa->callback(csa->callback_arg,
 2440                             AC_FOUND_DEVICE,
 2441                             &path, &cgd);
 2442         xpt_release_path(&path);
 2443 
 2444         return(1);
 2445 }
 2446 
 2447 static int
 2448 xptsetasyncbusfunc(struct cam_eb *bus, void *arg)
 2449 {
 2450         struct cam_path path;
 2451         struct ccb_pathinq cpi;
 2452         struct ccb_setasync *csa = (struct ccb_setasync *)arg;
 2453 
 2454         xpt_compile_path(&path, /*periph*/NULL,
 2455                          bus->path_id,
 2456                          CAM_TARGET_WILDCARD,
 2457                          CAM_LUN_WILDCARD);
 2458         xpt_path_lock(&path);
 2459         xpt_setup_ccb(&cpi.ccb_h, &path, CAM_PRIORITY_NORMAL);
 2460         cpi.ccb_h.func_code = XPT_PATH_INQ;
 2461         xpt_action((union ccb *)&cpi);
 2462         csa->callback(csa->callback_arg,
 2463                             AC_PATH_REGISTERED,
 2464                             &path, &cpi);
 2465         xpt_path_unlock(&path);
 2466         xpt_release_path(&path);
 2467 
 2468         return(1);
 2469 }
 2470 
 2471 void
 2472 xpt_action(union ccb *start_ccb)
 2473 {
 2474 
 2475         CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_action\n"));
 2476 
 2477         start_ccb->ccb_h.status = CAM_REQ_INPROG;
 2478         (*(start_ccb->ccb_h.path->bus->xport->action))(start_ccb);
 2479 }
 2480 
 2481 void
 2482 xpt_action_default(union ccb *start_ccb)
 2483 {
 2484         struct cam_path *path;
 2485         struct cam_sim *sim;
 2486         struct mtx *mtx;
 2487 
 2488         path = start_ccb->ccb_h.path;
 2489         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_action_default\n"));
 2490 
 2491         switch (start_ccb->ccb_h.func_code) {
 2492         case XPT_SCSI_IO:
 2493         {
 2494                 struct cam_ed *device;
 2495 
 2496                 /*
 2497                  * For the sake of compatibility with SCSI-1
 2498                  * devices that may not understand the identify
 2499                  * message, we include lun information in the
 2500                  * second byte of all commands.  SCSI-1 specifies
 2501                  * that luns are a 3 bit value and reserves only 3
 2502                  * bits for lun information in the CDB.  Later
 2503                  * revisions of the SCSI spec allow for more than 8
 2504                  * luns, but have deprecated lun information in the
 2505                  * CDB.  So, if the lun won't fit, we must omit.
 2506                  *
 2507                  * Also be aware that during initial probing for devices,
 2508                  * the inquiry information is unknown but initialized to 0.
 2509                  * This means that this code will be exercised while probing
 2510                  * devices with an ANSI revision greater than 2.
 2511                  */
 2512                 device = path->device;
 2513                 if (device->protocol_version <= SCSI_REV_2
 2514                  && start_ccb->ccb_h.target_lun < 8
 2515                  && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) {
 2516 
 2517                         start_ccb->csio.cdb_io.cdb_bytes[1] |=
 2518                             start_ccb->ccb_h.target_lun << 5;
 2519                 }
 2520                 start_ccb->csio.scsi_status = SCSI_STATUS_OK;
 2521         }
 2522         /* FALLTHROUGH */
 2523         case XPT_TARGET_IO:
 2524         case XPT_CONT_TARGET_IO:
 2525                 start_ccb->csio.sense_resid = 0;
 2526                 start_ccb->csio.resid = 0;
 2527                 /* FALLTHROUGH */
 2528         case XPT_ATA_IO:
 2529                 if (start_ccb->ccb_h.func_code == XPT_ATA_IO)
 2530                         start_ccb->ataio.resid = 0;
 2531                 /* FALLTHROUGH */
 2532         case XPT_RESET_DEV:
 2533         case XPT_ENG_EXEC:
 2534         case XPT_SMP_IO:
 2535         {
 2536                 struct cam_devq *devq;
 2537 
 2538                 devq = path->bus->sim->devq;
 2539                 mtx_lock(&devq->send_mtx);
 2540                 cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb);
 2541                 if (xpt_schedule_devq(devq, path->device) != 0)
 2542                         xpt_run_devq(devq);
 2543                 mtx_unlock(&devq->send_mtx);
 2544                 break;
 2545         }
 2546         case XPT_CALC_GEOMETRY:
 2547                 /* Filter out garbage */
 2548                 if (start_ccb->ccg.block_size == 0
 2549                  || start_ccb->ccg.volume_size == 0) {
 2550                         start_ccb->ccg.cylinders = 0;
 2551                         start_ccb->ccg.heads = 0;
 2552                         start_ccb->ccg.secs_per_track = 0;
 2553                         start_ccb->ccb_h.status = CAM_REQ_CMP;
 2554                         break;
 2555                 }
 2556 #if defined(PC98) || defined(__sparc64__)
 2557                 /*
 2558                  * In a PC-98 system, geometry translation depens on
 2559                  * the "real" device geometry obtained from mode page 4.
 2560                  * SCSI geometry translation is performed in the
 2561                  * initialization routine of the SCSI BIOS and the result
 2562                  * stored in host memory.  If the translation is available
 2563                  * in host memory, use it.  If not, rely on the default
 2564                  * translation the device driver performs.
 2565                  * For sparc64, we may need adjust the geometry of large
 2566                  * disks in order to fit the limitations of the 16-bit
 2567                  * fields of the VTOC8 disk label.
 2568                  */
 2569                 if (scsi_da_bios_params(&start_ccb->ccg) != 0) {
 2570                         start_ccb->ccb_h.status = CAM_REQ_CMP;
 2571                         break;
 2572                 }
 2573 #endif
 2574                 goto call_sim;
 2575         case XPT_ABORT:
 2576         {
 2577                 union ccb* abort_ccb;
 2578 
 2579                 abort_ccb = start_ccb->cab.abort_ccb;
 2580                 if (XPT_FC_IS_DEV_QUEUED(abort_ccb)) {
 2581 
 2582                         if (abort_ccb->ccb_h.pinfo.index >= 0) {
 2583                                 struct cam_ccbq *ccbq;
 2584                                 struct cam_ed *device;
 2585 
 2586                                 device = abort_ccb->ccb_h.path->device;
 2587                                 ccbq = &device->ccbq;
 2588                                 cam_ccbq_remove_ccb(ccbq, abort_ccb);
 2589                                 abort_ccb->ccb_h.status =
 2590                                     CAM_REQ_ABORTED|CAM_DEV_QFRZN;
 2591                                 xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
 2592                                 xpt_done(abort_ccb);
 2593                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 2594                                 break;
 2595                         }
 2596                         if (abort_ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX
 2597                          && (abort_ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) {
 2598                                 /*
 2599                                  * We've caught this ccb en route to
 2600                                  * the SIM.  Flag it for abort and the
 2601                                  * SIM will do so just before starting
 2602                                  * real work on the CCB.
 2603                                  */
 2604                                 abort_ccb->ccb_h.status =
 2605                                     CAM_REQ_ABORTED|CAM_DEV_QFRZN;
 2606                                 xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
 2607                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 2608                                 break;
 2609                         }
 2610                 }
 2611                 if (XPT_FC_IS_QUEUED(abort_ccb)
 2612                  && (abort_ccb->ccb_h.pinfo.index == CAM_DONEQ_INDEX)) {
 2613                         /*
 2614                          * It's already completed but waiting
 2615                          * for our SWI to get to it.
 2616                          */
 2617                         start_ccb->ccb_h.status = CAM_UA_ABORT;
 2618                         break;
 2619                 }
 2620                 /*
 2621                  * If we weren't able to take care of the abort request
 2622                  * in the XPT, pass the request down to the SIM for processing.
 2623                  */
 2624         }
 2625         /* FALLTHROUGH */
 2626         case XPT_ACCEPT_TARGET_IO:
 2627         case XPT_EN_LUN:
 2628         case XPT_IMMED_NOTIFY:
 2629         case XPT_NOTIFY_ACK:
 2630         case XPT_RESET_BUS:
 2631         case XPT_IMMEDIATE_NOTIFY:
 2632         case XPT_NOTIFY_ACKNOWLEDGE:
 2633         case XPT_GET_SIM_KNOB:
 2634         case XPT_SET_SIM_KNOB:
 2635         case XPT_GET_TRAN_SETTINGS:
 2636         case XPT_SET_TRAN_SETTINGS:
 2637         case XPT_PATH_INQ:
 2638 call_sim:
 2639                 sim = path->bus->sim;
 2640                 mtx = sim->mtx;
 2641                 if (mtx && !mtx_owned(mtx))
 2642                         mtx_lock(mtx);
 2643                 else
 2644                         mtx = NULL;
 2645                 (*(sim->sim_action))(sim, start_ccb);
 2646                 if (mtx)
 2647                         mtx_unlock(mtx);
 2648                 break;
 2649         case XPT_PATH_STATS:
 2650                 start_ccb->cpis.last_reset = path->bus->last_reset;
 2651                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 2652                 break;
 2653         case XPT_GDEV_TYPE:
 2654         {
 2655                 struct cam_ed *dev;
 2656 
 2657                 dev = path->device;
 2658                 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
 2659                         start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
 2660                 } else {
 2661                         struct ccb_getdev *cgd;
 2662 
 2663                         cgd = &start_ccb->cgd;
 2664                         cgd->protocol = dev->protocol;
 2665                         cgd->inq_data = dev->inq_data;
 2666                         cgd->ident_data = dev->ident_data;
 2667                         cgd->inq_flags = dev->inq_flags;
 2668                         cgd->ccb_h.status = CAM_REQ_CMP;
 2669                         cgd->serial_num_len = dev->serial_num_len;
 2670                         if ((dev->serial_num_len > 0)
 2671                          && (dev->serial_num != NULL))
 2672                                 bcopy(dev->serial_num, cgd->serial_num,
 2673                                       dev->serial_num_len);
 2674                 }
 2675                 break;
 2676         }
 2677         case XPT_GDEV_STATS:
 2678         {
 2679                 struct ccb_getdevstats *cgds = &start_ccb->cgds;
 2680                 struct cam_ed *dev = path->device;
 2681                 struct cam_eb *bus = path->bus;
 2682                 struct cam_et *tar = path->target;
 2683                 struct cam_devq *devq = bus->sim->devq;
 2684 
 2685                 mtx_lock(&devq->send_mtx);
 2686                 cgds->dev_openings = dev->ccbq.dev_openings;
 2687                 cgds->dev_active = dev->ccbq.dev_active;
 2688                 cgds->allocated = dev->ccbq.allocated;
 2689                 cgds->queued = cam_ccbq_pending_ccb_count(&dev->ccbq);
 2690                 cgds->held = cgds->allocated - cgds->dev_active - cgds->queued;
 2691                 cgds->last_reset = tar->last_reset;
 2692                 cgds->maxtags = dev->maxtags;
 2693                 cgds->mintags = dev->mintags;
 2694                 if (timevalcmp(&tar->last_reset, &bus->last_reset, <))
 2695                         cgds->last_reset = bus->last_reset;
 2696                 mtx_unlock(&devq->send_mtx);
 2697                 cgds->ccb_h.status = CAM_REQ_CMP;
 2698                 break;
 2699         }
 2700         case XPT_GDEVLIST:
 2701         {
 2702                 struct cam_periph       *nperiph;
 2703                 struct periph_list      *periph_head;
 2704                 struct ccb_getdevlist   *cgdl;
 2705                 u_int                   i;
 2706                 struct cam_ed           *device;
 2707                 int                     found;
 2708 
 2709 
 2710                 found = 0;
 2711 
 2712                 /*
 2713                  * Don't want anyone mucking with our data.
 2714                  */
 2715                 device = path->device;
 2716                 periph_head = &device->periphs;
 2717                 cgdl = &start_ccb->cgdl;
 2718 
 2719                 /*
 2720                  * Check and see if the list has changed since the user
 2721                  * last requested a list member.  If so, tell them that the
 2722                  * list has changed, and therefore they need to start over
 2723                  * from the beginning.
 2724                  */
 2725                 if ((cgdl->index != 0) &&
 2726                     (cgdl->generation != device->generation)) {
 2727                         cgdl->status = CAM_GDEVLIST_LIST_CHANGED;
 2728                         break;
 2729                 }
 2730 
 2731                 /*
 2732                  * Traverse the list of peripherals and attempt to find
 2733                  * the requested peripheral.
 2734                  */
 2735                 for (nperiph = SLIST_FIRST(periph_head), i = 0;
 2736                      (nperiph != NULL) && (i <= cgdl->index);
 2737                      nperiph = SLIST_NEXT(nperiph, periph_links), i++) {
 2738                         if (i == cgdl->index) {
 2739                                 strncpy(cgdl->periph_name,
 2740                                         nperiph->periph_name,
 2741                                         DEV_IDLEN);
 2742                                 cgdl->unit_number = nperiph->unit_number;
 2743                                 found = 1;
 2744                         }
 2745                 }
 2746                 if (found == 0) {
 2747                         cgdl->status = CAM_GDEVLIST_ERROR;
 2748                         break;
 2749                 }
 2750 
 2751                 if (nperiph == NULL)
 2752                         cgdl->status = CAM_GDEVLIST_LAST_DEVICE;
 2753                 else
 2754                         cgdl->status = CAM_GDEVLIST_MORE_DEVS;
 2755 
 2756                 cgdl->index++;
 2757                 cgdl->generation = device->generation;
 2758 
 2759                 cgdl->ccb_h.status = CAM_REQ_CMP;
 2760                 break;
 2761         }
 2762         case XPT_DEV_MATCH:
 2763         {
 2764                 dev_pos_type position_type;
 2765                 struct ccb_dev_match *cdm;
 2766 
 2767                 cdm = &start_ccb->cdm;
 2768 
 2769                 /*
 2770                  * There are two ways of getting at information in the EDT.
 2771                  * The first way is via the primary EDT tree.  It starts
 2772                  * with a list of busses, then a list of targets on a bus,
 2773                  * then devices/luns on a target, and then peripherals on a
 2774                  * device/lun.  The "other" way is by the peripheral driver
 2775                  * lists.  The peripheral driver lists are organized by
 2776                  * peripheral driver.  (obviously)  So it makes sense to
 2777                  * use the peripheral driver list if the user is looking
 2778                  * for something like "da1", or all "da" devices.  If the
 2779                  * user is looking for something on a particular bus/target
 2780                  * or lun, it's generally better to go through the EDT tree.
 2781                  */
 2782 
 2783                 if (cdm->pos.position_type != CAM_DEV_POS_NONE)
 2784                         position_type = cdm->pos.position_type;
 2785                 else {
 2786                         u_int i;
 2787 
 2788                         position_type = CAM_DEV_POS_NONE;
 2789 
 2790                         for (i = 0; i < cdm->num_patterns; i++) {
 2791                                 if ((cdm->patterns[i].type == DEV_MATCH_BUS)
 2792                                  ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){
 2793                                         position_type = CAM_DEV_POS_EDT;
 2794                                         break;
 2795                                 }
 2796                         }
 2797 
 2798                         if (cdm->num_patterns == 0)
 2799                                 position_type = CAM_DEV_POS_EDT;
 2800                         else if (position_type == CAM_DEV_POS_NONE)
 2801                                 position_type = CAM_DEV_POS_PDRV;
 2802                 }
 2803 
 2804                 switch(position_type & CAM_DEV_POS_TYPEMASK) {
 2805                 case CAM_DEV_POS_EDT:
 2806                         xptedtmatch(cdm);
 2807                         break;
 2808                 case CAM_DEV_POS_PDRV:
 2809                         xptperiphlistmatch(cdm);
 2810                         break;
 2811                 default:
 2812                         cdm->status = CAM_DEV_MATCH_ERROR;
 2813                         break;
 2814                 }
 2815 
 2816                 if (cdm->status == CAM_DEV_MATCH_ERROR)
 2817                         start_ccb->ccb_h.status = CAM_REQ_CMP_ERR;
 2818                 else
 2819                         start_ccb->ccb_h.status = CAM_REQ_CMP;
 2820 
 2821                 break;
 2822         }
 2823         case XPT_SASYNC_CB:
 2824         {
 2825                 struct ccb_setasync *csa;
 2826                 struct async_node *cur_entry;
 2827                 struct async_list *async_head;
 2828                 u_int32_t added;
 2829 
 2830                 csa = &start_ccb->csa;
 2831                 added = csa->event_enable;
 2832                 async_head = &path->device->asyncs;
 2833 
 2834                 /*
 2835                  * If there is already an entry for us, simply
 2836                  * update it.
 2837                  */
 2838                 cur_entry = SLIST_FIRST(async_head);
 2839                 while (cur_entry != NULL) {
 2840                         if ((cur_entry->callback_arg == csa->callback_arg)
 2841                          && (cur_entry->callback == csa->callback))
 2842                                 break;
 2843                         cur_entry = SLIST_NEXT(cur_entry, links);
 2844                 }
 2845 
 2846                 if (cur_entry != NULL) {
 2847                         /*
 2848                          * If the request has no flags set,
 2849                          * remove the entry.
 2850                          */
 2851                         added &= ~cur_entry->event_enable;
 2852                         if (csa->event_enable == 0) {
 2853                                 SLIST_REMOVE(async_head, cur_entry,
 2854                                              async_node, links);
 2855                                 xpt_release_device(path->device);
 2856                                 free(cur_entry, M_CAMXPT);
 2857                         } else {
 2858                                 cur_entry->event_enable = csa->event_enable;
 2859                         }
 2860                         csa->event_enable = added;
 2861                 } else {
 2862                         cur_entry = malloc(sizeof(*cur_entry), M_CAMXPT,
 2863                                            M_NOWAIT);
 2864                         if (cur_entry == NULL) {
 2865                                 csa->ccb_h.status = CAM_RESRC_UNAVAIL;
 2866                                 break;
 2867                         }
 2868                         cur_entry->event_enable = csa->event_enable;
 2869                         cur_entry->event_lock = (path->bus->sim->mtx &&
 2870                             mtx_owned(path->bus->sim->mtx)) ? 1 : 0;
 2871                         cur_entry->callback_arg = csa->callback_arg;
 2872                         cur_entry->callback = csa->callback;
 2873                         SLIST_INSERT_HEAD(async_head, cur_entry, links);
 2874                         xpt_acquire_device(path->device);
 2875                 }
 2876                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 2877                 break;
 2878         }
 2879         case XPT_REL_SIMQ:
 2880         {
 2881                 struct ccb_relsim *crs;
 2882                 struct cam_ed *dev;
 2883 
 2884                 crs = &start_ccb->crs;
 2885                 dev = path->device;
 2886                 if (dev == NULL) {
 2887 
 2888                         crs->ccb_h.status = CAM_DEV_NOT_THERE;
 2889                         break;
 2890                 }
 2891 
 2892                 if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) {
 2893 
 2894                         /* Don't ever go below one opening */
 2895                         if (crs->openings > 0) {
 2896                                 xpt_dev_ccbq_resize(path, crs->openings);
 2897                                 if (bootverbose) {
 2898                                         xpt_print(path,
 2899                                             "number of openings is now %d\n",
 2900                                             crs->openings);
 2901                                 }
 2902                         }
 2903                 }
 2904 
 2905                 mtx_lock(&dev->sim->devq->send_mtx);
 2906                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) {
 2907 
 2908                         if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
 2909 
 2910                                 /*
 2911                                  * Just extend the old timeout and decrement
 2912                                  * the freeze count so that a single timeout
 2913                                  * is sufficient for releasing the queue.
 2914                                  */
 2915                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
 2916                                 callout_stop(&dev->callout);
 2917                         } else {
 2918 
 2919                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
 2920                         }
 2921 
 2922                         callout_reset_sbt(&dev->callout,
 2923                             SBT_1MS * crs->release_timeout, 0,
 2924                             xpt_release_devq_timeout, dev, 0);
 2925 
 2926                         dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING;
 2927 
 2928                 }
 2929 
 2930                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) {
 2931 
 2932                         if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) {
 2933                                 /*
 2934                                  * Decrement the freeze count so that a single
 2935                                  * completion is still sufficient to unfreeze
 2936                                  * the queue.
 2937                                  */
 2938                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
 2939                         } else {
 2940 
 2941                                 dev->flags |= CAM_DEV_REL_ON_COMPLETE;
 2942                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
 2943                         }
 2944                 }
 2945 
 2946                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) {
 2947 
 2948                         if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
 2949                          || (dev->ccbq.dev_active == 0)) {
 2950 
 2951                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
 2952                         } else {
 2953 
 2954                                 dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY;
 2955                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
 2956                         }
 2957                 }
 2958                 mtx_unlock(&dev->sim->devq->send_mtx);
 2959 
 2960                 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0)
 2961                         xpt_release_devq(path, /*count*/1, /*run_queue*/TRUE);
 2962                 start_ccb->crs.qfrozen_cnt = dev->ccbq.queue.qfrozen_cnt;
 2963                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 2964                 break;
 2965         }
 2966         case XPT_DEBUG: {
 2967                 struct cam_path *oldpath;
 2968 
 2969                 /* Check that all request bits are supported. */
 2970                 if (start_ccb->cdbg.flags & ~(CAM_DEBUG_COMPILE)) {
 2971                         start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
 2972                         break;
 2973                 }
 2974 
 2975                 cam_dflags = CAM_DEBUG_NONE;
 2976                 if (cam_dpath != NULL) {
 2977                         oldpath = cam_dpath;
 2978                         cam_dpath = NULL;
 2979                         xpt_free_path(oldpath);
 2980                 }
 2981                 if (start_ccb->cdbg.flags != CAM_DEBUG_NONE) {
 2982                         if (xpt_create_path(&cam_dpath, NULL,
 2983                                             start_ccb->ccb_h.path_id,
 2984                                             start_ccb->ccb_h.target_id,
 2985                                             start_ccb->ccb_h.target_lun) !=
 2986                                             CAM_REQ_CMP) {
 2987                                 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
 2988                         } else {
 2989                                 cam_dflags = start_ccb->cdbg.flags;
 2990                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 2991                                 xpt_print(cam_dpath, "debugging flags now %x\n",
 2992                                     cam_dflags);
 2993                         }
 2994                 } else
 2995                         start_ccb->ccb_h.status = CAM_REQ_CMP;
 2996                 break;
 2997         }
 2998         case XPT_NOOP:
 2999                 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0)
 3000                         xpt_freeze_devq(path, 1);
 3001                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 3002                 break;
 3003         case XPT_REPROBE_LUN:
 3004                 xpt_async(AC_INQ_CHANGED, path, NULL);
 3005                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 3006                 xpt_done(start_ccb);
 3007                 break;
 3008         default:
 3009         case XPT_SDEV_TYPE:
 3010         case XPT_TERM_IO:
 3011         case XPT_ENG_INQ:
 3012                 /* XXX Implement */
 3013                 printf("%s: CCB type %#x not supported\n", __func__,
 3014                        start_ccb->ccb_h.func_code);
 3015                 start_ccb->ccb_h.status = CAM_PROVIDE_FAIL;
 3016                 if (start_ccb->ccb_h.func_code & XPT_FC_DEV_QUEUED) {
 3017                         xpt_done(start_ccb);
 3018                 }
 3019                 break;
 3020         }
 3021 }
 3022 
 3023 void
 3024 xpt_polled_action(union ccb *start_ccb)
 3025 {
 3026         u_int32_t timeout;
 3027         struct    cam_sim *sim;
 3028         struct    cam_devq *devq;
 3029         struct    cam_ed *dev;
 3030         struct mtx *mtx;
 3031 
 3032         timeout = start_ccb->ccb_h.timeout * 10;
 3033         sim = start_ccb->ccb_h.path->bus->sim;
 3034         devq = sim->devq;
 3035         mtx = sim->mtx;
 3036         dev = start_ccb->ccb_h.path->device;
 3037 
 3038         mtx_unlock(&dev->device_mtx);
 3039 
 3040         /*
 3041          * Steal an opening so that no other queued requests
 3042          * can get it before us while we simulate interrupts.
 3043          */
 3044         mtx_lock(&devq->send_mtx);
 3045         dev->ccbq.dev_openings--;
 3046         while((devq->send_openings <= 0 || dev->ccbq.dev_openings < 0) &&
 3047             (--timeout > 0)) {
 3048                 mtx_unlock(&devq->send_mtx);
 3049                 DELAY(100);
 3050                 if (mtx)
 3051                         mtx_lock(mtx);
 3052                 (*(sim->sim_poll))(sim);
 3053                 if (mtx)
 3054                         mtx_unlock(mtx);
 3055                 camisr_runqueue();
 3056                 mtx_lock(&devq->send_mtx);
 3057         }
 3058         dev->ccbq.dev_openings++;
 3059         mtx_unlock(&devq->send_mtx);
 3060 
 3061         if (timeout != 0) {
 3062                 xpt_action(start_ccb);
 3063                 while(--timeout > 0) {
 3064                         if (mtx)
 3065                                 mtx_lock(mtx);
 3066                         (*(sim->sim_poll))(sim);
 3067                         if (mtx)
 3068                                 mtx_unlock(mtx);
 3069                         camisr_runqueue();
 3070                         if ((start_ccb->ccb_h.status  & CAM_STATUS_MASK)
 3071                             != CAM_REQ_INPROG)
 3072                                 break;
 3073                         DELAY(100);
 3074                 }
 3075                 if (timeout == 0) {
 3076                         /*
 3077                          * XXX Is it worth adding a sim_timeout entry
 3078                          * point so we can attempt recovery?  If
 3079                          * this is only used for dumps, I don't think
 3080                          * it is.
 3081                          */
 3082                         start_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
 3083                 }
 3084         } else {
 3085                 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
 3086         }
 3087 
 3088         mtx_lock(&dev->device_mtx);
 3089 }
 3090 
 3091 /*
 3092  * Schedule a peripheral driver to receive a ccb when its
 3093  * target device has space for more transactions.
 3094  */
 3095 void
 3096 xpt_schedule(struct cam_periph *periph, u_int32_t new_priority)
 3097 {
 3098 
 3099         CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n"));
 3100         cam_periph_assert(periph, MA_OWNED);
 3101         if (new_priority < periph->scheduled_priority) {
 3102                 periph->scheduled_priority = new_priority;
 3103                 xpt_run_allocq(periph, 0);
 3104         }
 3105 }
 3106 
 3107 
 3108 /*
 3109  * Schedule a device to run on a given queue.
 3110  * If the device was inserted as a new entry on the queue,
 3111  * return 1 meaning the device queue should be run. If we
 3112  * were already queued, implying someone else has already
 3113  * started the queue, return 0 so the caller doesn't attempt
 3114  * to run the queue.
 3115  */
 3116 static int
 3117 xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo,
 3118                  u_int32_t new_priority)
 3119 {
 3120         int retval;
 3121         u_int32_t old_priority;
 3122 
 3123         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_schedule_dev\n"));
 3124 
 3125         old_priority = pinfo->priority;
 3126 
 3127         /*
 3128          * Are we already queued?
 3129          */
 3130         if (pinfo->index != CAM_UNQUEUED_INDEX) {
 3131                 /* Simply reorder based on new priority */
 3132                 if (new_priority < old_priority) {
 3133                         camq_change_priority(queue, pinfo->index,
 3134                                              new_priority);
 3135                         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3136                                         ("changed priority to %d\n",
 3137                                          new_priority));
 3138                         retval = 1;
 3139                 } else
 3140                         retval = 0;
 3141         } else {
 3142                 /* New entry on the queue */
 3143                 if (new_priority < old_priority)
 3144                         pinfo->priority = new_priority;
 3145 
 3146                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3147                                 ("Inserting onto queue\n"));
 3148                 pinfo->generation = ++queue->generation;
 3149                 camq_insert(queue, pinfo);
 3150                 retval = 1;
 3151         }
 3152         return (retval);
 3153 }
 3154 
 3155 static void
 3156 xpt_run_allocq_task(void *context, int pending)
 3157 {
 3158         struct cam_periph *periph = context;
 3159 
 3160         cam_periph_lock(periph);
 3161         periph->flags &= ~CAM_PERIPH_RUN_TASK;
 3162         xpt_run_allocq(periph, 1);
 3163         cam_periph_unlock(periph);
 3164         cam_periph_release(periph);
 3165 }
 3166 
 3167 static void
 3168 xpt_run_allocq(struct cam_periph *periph, int sleep)
 3169 {
 3170         struct cam_ed   *device;
 3171         union ccb       *ccb;
 3172         uint32_t         prio;
 3173 
 3174         cam_periph_assert(periph, MA_OWNED);
 3175         if (periph->periph_allocating)
 3176                 return;
 3177         periph->periph_allocating = 1;
 3178         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_allocq(%p)\n", periph));
 3179         device = periph->path->device;
 3180         ccb = NULL;
 3181 restart:
 3182         while ((prio = min(periph->scheduled_priority,
 3183             periph->immediate_priority)) != CAM_PRIORITY_NONE &&
 3184             (periph->periph_allocated - (ccb != NULL ? 1 : 0) <
 3185              device->ccbq.total_openings || prio <= CAM_PRIORITY_OOB)) {
 3186 
 3187                 if (ccb == NULL &&
 3188                     (ccb = xpt_get_ccb_nowait(periph)) == NULL) {
 3189                         if (sleep) {
 3190                                 ccb = xpt_get_ccb(periph);
 3191                                 goto restart;
 3192                         }
 3193                         if (periph->flags & CAM_PERIPH_RUN_TASK)
 3194                                 break;
 3195                         cam_periph_doacquire(periph);
 3196                         periph->flags |= CAM_PERIPH_RUN_TASK;
 3197                         taskqueue_enqueue(xsoftc.xpt_taskq,
 3198                             &periph->periph_run_task);
 3199                         break;
 3200                 }
 3201                 xpt_setup_ccb(&ccb->ccb_h, periph->path, prio);
 3202                 if (prio == periph->immediate_priority) {
 3203                         periph->immediate_priority = CAM_PRIORITY_NONE;
 3204                         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3205                                         ("waking cam_periph_getccb()\n"));
 3206                         SLIST_INSERT_HEAD(&periph->ccb_list, &ccb->ccb_h,
 3207                                           periph_links.sle);
 3208                         wakeup(&periph->ccb_list);
 3209                 } else {
 3210                         periph->scheduled_priority = CAM_PRIORITY_NONE;
 3211                         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3212                                         ("calling periph_start()\n"));
 3213                         periph->periph_start(periph, ccb);
 3214                 }
 3215                 ccb = NULL;
 3216         }
 3217         if (ccb != NULL)
 3218                 xpt_release_ccb(ccb);
 3219         periph->periph_allocating = 0;
 3220 }
 3221 
 3222 static void
 3223 xpt_run_devq(struct cam_devq *devq)
 3224 {
 3225         char cdb_str[(SCSI_MAX_CDBLEN * 3) + 1];
 3226         struct mtx *mtx;
 3227 
 3228         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_devq\n"));
 3229 
 3230         devq->send_queue.qfrozen_cnt++;
 3231         while ((devq->send_queue.entries > 0)
 3232             && (devq->send_openings > 0)
 3233             && (devq->send_queue.qfrozen_cnt <= 1)) {
 3234                 struct  cam_ed *device;
 3235                 union ccb *work_ccb;
 3236                 struct  cam_sim *sim;
 3237 
 3238                 device = (struct cam_ed *)camq_remove(&devq->send_queue,
 3239                                                            CAMQ_HEAD);
 3240                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3241                                 ("running device %p\n", device));
 3242 
 3243                 work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD);
 3244                 if (work_ccb == NULL) {
 3245                         printf("device on run queue with no ccbs???\n");
 3246                         continue;
 3247                 }
 3248 
 3249                 if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) {
 3250 
 3251                         mtx_lock(&xsoftc.xpt_highpower_lock);
 3252                         if (xsoftc.num_highpower <= 0) {
 3253                                 /*
 3254                                  * We got a high power command, but we
 3255                                  * don't have any available slots.  Freeze
 3256                                  * the device queue until we have a slot
 3257                                  * available.
 3258                                  */
 3259                                 xpt_freeze_devq_device(device, 1);
 3260                                 STAILQ_INSERT_TAIL(&xsoftc.highpowerq, device,
 3261                                                    highpowerq_entry);
 3262 
 3263                                 mtx_unlock(&xsoftc.xpt_highpower_lock);
 3264                                 continue;
 3265                         } else {
 3266                                 /*
 3267                                  * Consume a high power slot while
 3268                                  * this ccb runs.
 3269                                  */
 3270                                 xsoftc.num_highpower--;
 3271                         }
 3272                         mtx_unlock(&xsoftc.xpt_highpower_lock);
 3273                 }
 3274                 cam_ccbq_remove_ccb(&device->ccbq, work_ccb);
 3275                 cam_ccbq_send_ccb(&device->ccbq, work_ccb);
 3276                 devq->send_openings--;
 3277                 devq->send_active++;
 3278                 xpt_schedule_devq(devq, device);
 3279                 mtx_unlock(&devq->send_mtx);
 3280 
 3281                 if ((work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0) {
 3282                         /*
 3283                          * The client wants to freeze the queue
 3284                          * after this CCB is sent.
 3285                          */
 3286                         xpt_freeze_devq(work_ccb->ccb_h.path, 1);
 3287                 }
 3288 
 3289                 /* In Target mode, the peripheral driver knows best... */
 3290                 if (work_ccb->ccb_h.func_code == XPT_SCSI_IO) {
 3291                         if ((device->inq_flags & SID_CmdQue) != 0
 3292                          && work_ccb->csio.tag_action != CAM_TAG_ACTION_NONE)
 3293                                 work_ccb->ccb_h.flags |= CAM_TAG_ACTION_VALID;
 3294                         else
 3295                                 /*
 3296                                  * Clear this in case of a retried CCB that
 3297                                  * failed due to a rejected tag.
 3298                                  */
 3299                                 work_ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
 3300                 }
 3301 
 3302                 switch (work_ccb->ccb_h.func_code) {
 3303                 case XPT_SCSI_IO:
 3304                         CAM_DEBUG(work_ccb->ccb_h.path,
 3305                             CAM_DEBUG_CDB,("%s. CDB: %s\n",
 3306                              scsi_op_desc(work_ccb->csio.cdb_io.cdb_bytes[0],
 3307                                           &device->inq_data),
 3308                              scsi_cdb_string(work_ccb->csio.cdb_io.cdb_bytes,
 3309                                              cdb_str, sizeof(cdb_str))));
 3310                         break;
 3311                 case XPT_ATA_IO:
 3312                         CAM_DEBUG(work_ccb->ccb_h.path,
 3313                             CAM_DEBUG_CDB,("%s. ACB: %s\n",
 3314                              ata_op_string(&work_ccb->ataio.cmd),
 3315                              ata_cmd_string(&work_ccb->ataio.cmd,
 3316                                             cdb_str, sizeof(cdb_str))));
 3317                         break;
 3318                 default:
 3319                         break;
 3320                 }
 3321 
 3322                 /*
 3323                  * Device queues can be shared among multiple SIM instances
 3324                  * that reside on different busses.  Use the SIM from the
 3325                  * queued device, rather than the one from the calling bus.
 3326                  */
 3327                 sim = device->sim;
 3328                 mtx = sim->mtx;
 3329                 if (mtx && !mtx_owned(mtx))
 3330                         mtx_lock(mtx);
 3331                 else
 3332                         mtx = NULL;
 3333                 (*(sim->sim_action))(sim, work_ccb);
 3334                 if (mtx)
 3335                         mtx_unlock(mtx);
 3336                 mtx_lock(&devq->send_mtx);
 3337         }
 3338         devq->send_queue.qfrozen_cnt--;
 3339 }
 3340 
 3341 /*
 3342  * This function merges stuff from the slave ccb into the master ccb, while
 3343  * keeping important fields in the master ccb constant.
 3344  */
 3345 void
 3346 xpt_merge_ccb(union ccb *master_ccb, union ccb *slave_ccb)
 3347 {
 3348 
 3349         /*
 3350          * Pull fields that are valid for peripheral drivers to set
 3351          * into the master CCB along with the CCB "payload".
 3352          */
 3353         master_ccb->ccb_h.retry_count = slave_ccb->ccb_h.retry_count;
 3354         master_ccb->ccb_h.func_code = slave_ccb->ccb_h.func_code;
 3355         master_ccb->ccb_h.timeout = slave_ccb->ccb_h.timeout;
 3356         master_ccb->ccb_h.flags = slave_ccb->ccb_h.flags;
 3357         bcopy(&(&slave_ccb->ccb_h)[1], &(&master_ccb->ccb_h)[1],
 3358               sizeof(union ccb) - sizeof(struct ccb_hdr));
 3359 }
 3360 
 3361 void
 3362 xpt_setup_ccb_flags(struct ccb_hdr *ccb_h, struct cam_path *path,
 3363                     u_int32_t priority, u_int32_t flags)
 3364 {
 3365 
 3366         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_setup_ccb\n"));
 3367         ccb_h->pinfo.priority = priority;
 3368         ccb_h->path = path;
 3369         ccb_h->path_id = path->bus->path_id;
 3370         if (path->target)
 3371                 ccb_h->target_id = path->target->target_id;
 3372         else
 3373                 ccb_h->target_id = CAM_TARGET_WILDCARD;
 3374         if (path->device) {
 3375                 ccb_h->target_lun = path->device->lun_id;
 3376                 ccb_h->pinfo.generation = ++path->device->ccbq.queue.generation;
 3377         } else {
 3378                 ccb_h->target_lun = CAM_TARGET_WILDCARD;
 3379         }
 3380         ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
 3381         ccb_h->flags = flags;
 3382         ccb_h->xflags = 0;
 3383 }
 3384 
 3385 void
 3386 xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority)
 3387 {
 3388         xpt_setup_ccb_flags(ccb_h, path, priority, /*flags*/ 0);
 3389 }
 3390 
 3391 /* Path manipulation functions */
 3392 cam_status
 3393 xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph,
 3394                 path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
 3395 {
 3396         struct     cam_path *path;
 3397         cam_status status;
 3398 
 3399         path = (struct cam_path *)malloc(sizeof(*path), M_CAMPATH, M_NOWAIT);
 3400 
 3401         if (path == NULL) {
 3402                 status = CAM_RESRC_UNAVAIL;
 3403                 return(status);
 3404         }
 3405         status = xpt_compile_path(path, perph, path_id, target_id, lun_id);
 3406         if (status != CAM_REQ_CMP) {
 3407                 free(path, M_CAMPATH);
 3408                 path = NULL;
 3409         }
 3410         *new_path_ptr = path;
 3411         return (status);
 3412 }
 3413 
 3414 cam_status
 3415 xpt_create_path_unlocked(struct cam_path **new_path_ptr,
 3416                          struct cam_periph *periph, path_id_t path_id,
 3417                          target_id_t target_id, lun_id_t lun_id)
 3418 {
 3419 
 3420         return (xpt_create_path(new_path_ptr, periph, path_id, target_id,
 3421             lun_id));
 3422 }
 3423 
 3424 cam_status
 3425 xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph,
 3426                  path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
 3427 {
 3428         struct       cam_eb *bus;
 3429         struct       cam_et *target;
 3430         struct       cam_ed *device;
 3431         cam_status   status;
 3432 
 3433         status = CAM_REQ_CMP;   /* Completed without error */
 3434         target = NULL;          /* Wildcarded */
 3435         device = NULL;          /* Wildcarded */
 3436 
 3437         /*
 3438          * We will potentially modify the EDT, so block interrupts
 3439          * that may attempt to create cam paths.
 3440          */
 3441         bus = xpt_find_bus(path_id);
 3442         if (bus == NULL) {
 3443                 status = CAM_PATH_INVALID;
 3444         } else {
 3445                 xpt_lock_buses();
 3446                 mtx_lock(&bus->eb_mtx);
 3447                 target = xpt_find_target(bus, target_id);
 3448                 if (target == NULL) {
 3449                         /* Create one */
 3450                         struct cam_et *new_target;
 3451 
 3452                         new_target = xpt_alloc_target(bus, target_id);
 3453                         if (new_target == NULL) {
 3454                                 status = CAM_RESRC_UNAVAIL;
 3455                         } else {
 3456                                 target = new_target;
 3457                         }
 3458                 }
 3459                 xpt_unlock_buses();
 3460                 if (target != NULL) {
 3461                         device = xpt_find_device(target, lun_id);
 3462                         if (device == NULL) {
 3463                                 /* Create one */
 3464                                 struct cam_ed *new_device;
 3465 
 3466                                 new_device =
 3467                                     (*(bus->xport->alloc_device))(bus,
 3468                                                                       target,
 3469                                                                       lun_id);
 3470                                 if (new_device == NULL) {
 3471                                         status = CAM_RESRC_UNAVAIL;
 3472                                 } else {
 3473                                         device = new_device;
 3474                                 }
 3475                         }
 3476                 }
 3477                 mtx_unlock(&bus->eb_mtx);
 3478         }
 3479 
 3480         /*
 3481          * Only touch the user's data if we are successful.
 3482          */
 3483         if (status == CAM_REQ_CMP) {
 3484                 new_path->periph = perph;
 3485                 new_path->bus = bus;
 3486                 new_path->target = target;
 3487                 new_path->device = device;
 3488                 CAM_DEBUG(new_path, CAM_DEBUG_TRACE, ("xpt_compile_path\n"));
 3489         } else {
 3490                 if (device != NULL)
 3491                         xpt_release_device(device);
 3492                 if (target != NULL)
 3493                         xpt_release_target(target);
 3494                 if (bus != NULL)
 3495                         xpt_release_bus(bus);
 3496         }
 3497         return (status);
 3498 }
 3499 
 3500 cam_status
 3501 xpt_clone_path(struct cam_path **new_path_ptr, struct cam_path *path)
 3502 {
 3503         struct     cam_path *new_path;
 3504 
 3505         new_path = (struct cam_path *)malloc(sizeof(*path), M_CAMPATH, M_NOWAIT);
 3506         if (new_path == NULL)
 3507                 return(CAM_RESRC_UNAVAIL);
 3508         xpt_copy_path(new_path, path);
 3509         *new_path_ptr = new_path;
 3510         return (CAM_REQ_CMP);
 3511 }
 3512 
 3513 void
 3514 xpt_copy_path(struct cam_path *new_path, struct cam_path *path)
 3515 {
 3516 
 3517         *new_path = *path;
 3518         if (path->bus != NULL)
 3519                 xpt_acquire_bus(path->bus);
 3520         if (path->target != NULL)
 3521                 xpt_acquire_target(path->target);
 3522         if (path->device != NULL)
 3523                 xpt_acquire_device(path->device);
 3524 }
 3525 
 3526 void
 3527 xpt_release_path(struct cam_path *path)
 3528 {
 3529         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_path\n"));
 3530         if (path->device != NULL) {
 3531                 xpt_release_device(path->device);
 3532                 path->device = NULL;
 3533         }
 3534         if (path->target != NULL) {
 3535                 xpt_release_target(path->target);
 3536                 path->target = NULL;
 3537         }
 3538         if (path->bus != NULL) {
 3539                 xpt_release_bus(path->bus);
 3540                 path->bus = NULL;
 3541         }
 3542 }
 3543 
 3544 void
 3545 xpt_free_path(struct cam_path *path)
 3546 {
 3547 
 3548         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_free_path\n"));
 3549         xpt_release_path(path);
 3550         free(path, M_CAMPATH);
 3551 }
 3552 
 3553 void
 3554 xpt_path_counts(struct cam_path *path, uint32_t *bus_ref,
 3555     uint32_t *periph_ref, uint32_t *target_ref, uint32_t *device_ref)
 3556 {
 3557 
 3558         xpt_lock_buses();
 3559         if (bus_ref) {
 3560                 if (path->bus)
 3561                         *bus_ref = path->bus->refcount;
 3562                 else
 3563                         *bus_ref = 0;
 3564         }
 3565         if (periph_ref) {
 3566                 if (path->periph)
 3567                         *periph_ref = path->periph->refcount;
 3568                 else
 3569                         *periph_ref = 0;
 3570         }
 3571         xpt_unlock_buses();
 3572         if (target_ref) {
 3573                 if (path->target)
 3574                         *target_ref = path->target->refcount;
 3575                 else
 3576                         *target_ref = 0;
 3577         }
 3578         if (device_ref) {
 3579                 if (path->device)
 3580                         *device_ref = path->device->refcount;
 3581                 else
 3582                         *device_ref = 0;
 3583         }
 3584 }
 3585 
 3586 /*
 3587  * Return -1 for failure, 0 for exact match, 1 for match with wildcards
 3588  * in path1, 2 for match with wildcards in path2.
 3589  */
 3590 int
 3591 xpt_path_comp(struct cam_path *path1, struct cam_path *path2)
 3592 {
 3593         int retval = 0;
 3594 
 3595         if (path1->bus != path2->bus) {
 3596                 if (path1->bus->path_id == CAM_BUS_WILDCARD)
 3597                         retval = 1;
 3598                 else if (path2->bus->path_id == CAM_BUS_WILDCARD)
 3599                         retval = 2;
 3600                 else
 3601                         return (-1);
 3602         }
 3603         if (path1->target != path2->target) {
 3604                 if (path1->target->target_id == CAM_TARGET_WILDCARD) {
 3605                         if (retval == 0)
 3606                                 retval = 1;
 3607                 } else if (path2->target->target_id == CAM_TARGET_WILDCARD)
 3608                         retval = 2;
 3609                 else
 3610                         return (-1);
 3611         }
 3612         if (path1->device != path2->device) {
 3613                 if (path1->device->lun_id == CAM_LUN_WILDCARD) {
 3614                         if (retval == 0)
 3615                                 retval = 1;
 3616                 } else if (path2->device->lun_id == CAM_LUN_WILDCARD)
 3617                         retval = 2;
 3618                 else
 3619                         return (-1);
 3620         }
 3621         return (retval);
 3622 }
 3623 
 3624 int
 3625 xpt_path_comp_dev(struct cam_path *path, struct cam_ed *dev)
 3626 {
 3627         int retval = 0;
 3628 
 3629         if (path->bus != dev->target->bus) {
 3630                 if (path->bus->path_id == CAM_BUS_WILDCARD)
 3631                         retval = 1;
 3632                 else if (dev->target->bus->path_id == CAM_BUS_WILDCARD)
 3633                         retval = 2;
 3634                 else
 3635                         return (-1);
 3636         }
 3637         if (path->target != dev->target) {
 3638                 if (path->target->target_id == CAM_TARGET_WILDCARD) {
 3639                         if (retval == 0)
 3640                                 retval = 1;
 3641                 } else if (dev->target->target_id == CAM_TARGET_WILDCARD)
 3642                         retval = 2;
 3643                 else
 3644                         return (-1);
 3645         }
 3646         if (path->device != dev) {
 3647                 if (path->device->lun_id == CAM_LUN_WILDCARD) {
 3648                         if (retval == 0)
 3649                                 retval = 1;
 3650                 } else if (dev->lun_id == CAM_LUN_WILDCARD)
 3651                         retval = 2;
 3652                 else
 3653                         return (-1);
 3654         }
 3655         return (retval);
 3656 }
 3657 
 3658 void
 3659 xpt_print_path(struct cam_path *path)
 3660 {
 3661 
 3662         if (path == NULL)
 3663                 printf("(nopath): ");
 3664         else {
 3665                 if (path->periph != NULL)
 3666                         printf("(%s%d:", path->periph->periph_name,
 3667                                path->periph->unit_number);
 3668                 else
 3669                         printf("(noperiph:");
 3670 
 3671                 if (path->bus != NULL)
 3672                         printf("%s%d:%d:", path->bus->sim->sim_name,
 3673                                path->bus->sim->unit_number,
 3674                                path->bus->sim->bus_id);
 3675                 else
 3676                         printf("nobus:");
 3677 
 3678                 if (path->target != NULL)
 3679                         printf("%d:", path->target->target_id);
 3680                 else
 3681                         printf("X:");
 3682 
 3683                 if (path->device != NULL)
 3684                         printf("%jx): ", (uintmax_t)path->device->lun_id);
 3685                 else
 3686                         printf("X): ");
 3687         }
 3688 }
 3689 
 3690 void
 3691 xpt_print_device(struct cam_ed *device)
 3692 {
 3693 
 3694         if (device == NULL)
 3695                 printf("(nopath): ");
 3696         else {
 3697                 printf("(noperiph:%s%d:%d:%d:%jx): ", device->sim->sim_name,
 3698                        device->sim->unit_number,
 3699                        device->sim->bus_id,
 3700                        device->target->target_id,
 3701                        (uintmax_t)device->lun_id);
 3702         }
 3703 }
 3704 
 3705 void
 3706 xpt_print(struct cam_path *path, const char *fmt, ...)
 3707 {
 3708         va_list ap;
 3709         xpt_print_path(path);
 3710         va_start(ap, fmt);
 3711         vprintf(fmt, ap);
 3712         va_end(ap);
 3713 }
 3714 
 3715 int
 3716 xpt_path_string(struct cam_path *path, char *str, size_t str_len)
 3717 {
 3718         struct sbuf sb;
 3719 
 3720         sbuf_new(&sb, str, str_len, 0);
 3721 
 3722         if (path == NULL)
 3723                 sbuf_printf(&sb, "(nopath): ");
 3724         else {
 3725                 if (path->periph != NULL)
 3726                         sbuf_printf(&sb, "(%s%d:", path->periph->periph_name,
 3727                                     path->periph->unit_number);
 3728                 else
 3729                         sbuf_printf(&sb, "(noperiph:");
 3730 
 3731                 if (path->bus != NULL)
 3732                         sbuf_printf(&sb, "%s%d:%d:", path->bus->sim->sim_name,
 3733                                     path->bus->sim->unit_number,
 3734                                     path->bus->sim->bus_id);
 3735                 else
 3736                         sbuf_printf(&sb, "nobus:");
 3737 
 3738                 if (path->target != NULL)
 3739                         sbuf_printf(&sb, "%d:", path->target->target_id);
 3740                 else
 3741                         sbuf_printf(&sb, "X:");
 3742 
 3743                 if (path->device != NULL)
 3744                         sbuf_printf(&sb, "%jx): ",
 3745                             (uintmax_t)path->device->lun_id);
 3746                 else
 3747                         sbuf_printf(&sb, "X): ");
 3748         }
 3749         sbuf_finish(&sb);
 3750 
 3751         return(sbuf_len(&sb));
 3752 }
 3753 
 3754 path_id_t
 3755 xpt_path_path_id(struct cam_path *path)
 3756 {
 3757         return(path->bus->path_id);
 3758 }
 3759 
 3760 target_id_t
 3761 xpt_path_target_id(struct cam_path *path)
 3762 {
 3763         if (path->target != NULL)
 3764                 return (path->target->target_id);
 3765         else
 3766                 return (CAM_TARGET_WILDCARD);
 3767 }
 3768 
 3769 lun_id_t
 3770 xpt_path_lun_id(struct cam_path *path)
 3771 {
 3772         if (path->device != NULL)
 3773                 return (path->device->lun_id);
 3774         else
 3775                 return (CAM_LUN_WILDCARD);
 3776 }
 3777 
 3778 struct cam_sim *
 3779 xpt_path_sim(struct cam_path *path)
 3780 {
 3781 
 3782         return (path->bus->sim);
 3783 }
 3784 
 3785 struct cam_periph*
 3786 xpt_path_periph(struct cam_path *path)
 3787 {
 3788 
 3789         return (path->periph);
 3790 }
 3791 
 3792 int
 3793 xpt_path_legacy_ata_id(struct cam_path *path)
 3794 {
 3795         struct cam_eb *bus;
 3796         int bus_id;
 3797 
 3798         if ((strcmp(path->bus->sim->sim_name, "ata") != 0) &&
 3799             strcmp(path->bus->sim->sim_name, "ahcich") != 0 &&
 3800             strcmp(path->bus->sim->sim_name, "mvsch") != 0 &&
 3801             strcmp(path->bus->sim->sim_name, "siisch") != 0)
 3802                 return (-1);
 3803 
 3804         if (strcmp(path->bus->sim->sim_name, "ata") == 0 &&
 3805             path->bus->sim->unit_number < 2) {
 3806                 bus_id = path->bus->sim->unit_number;
 3807         } else {
 3808                 bus_id = 2;
 3809                 xpt_lock_buses();
 3810                 TAILQ_FOREACH(bus, &xsoftc.xpt_busses, links) {
 3811                         if (bus == path->bus)
 3812                                 break;
 3813                         if ((strcmp(bus->sim->sim_name, "ata") == 0 &&
 3814                              bus->sim->unit_number >= 2) ||
 3815                             strcmp(bus->sim->sim_name, "ahcich") == 0 ||
 3816                             strcmp(bus->sim->sim_name, "mvsch") == 0 ||
 3817                             strcmp(bus->sim->sim_name, "siisch") == 0)
 3818                                 bus_id++;
 3819                 }
 3820                 xpt_unlock_buses();
 3821         }
 3822         if (path->target != NULL) {
 3823                 if (path->target->target_id < 2)
 3824                         return (bus_id * 2 + path->target->target_id);
 3825                 else
 3826                         return (-1);
 3827         } else
 3828                 return (bus_id * 2);
 3829 }
 3830 
 3831 /*
 3832  * Release a CAM control block for the caller.  Remit the cost of the structure
 3833  * to the device referenced by the path.  If the this device had no 'credits'
 3834  * and peripheral drivers have registered async callbacks for this notification
 3835  * call them now.
 3836  */
 3837 void
 3838 xpt_release_ccb(union ccb *free_ccb)
 3839 {
 3840         struct   cam_ed *device;
 3841         struct   cam_periph *periph;
 3842 
 3843         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_release_ccb\n"));
 3844         xpt_path_assert(free_ccb->ccb_h.path, MA_OWNED);
 3845         device = free_ccb->ccb_h.path->device;
 3846         periph = free_ccb->ccb_h.path->periph;
 3847 
 3848         xpt_free_ccb(free_ccb);
 3849         periph->periph_allocated--;
 3850         cam_ccbq_release_opening(&device->ccbq);
 3851         xpt_run_allocq(periph, 0);
 3852 }
 3853 
 3854 /* Functions accessed by SIM drivers */
 3855 
 3856 static struct xpt_xport xport_default = {
 3857         .alloc_device = xpt_alloc_device_default,
 3858         .action = xpt_action_default,
 3859         .async = xpt_dev_async_default,
 3860 };
 3861 
 3862 /*
 3863  * A sim structure, listing the SIM entry points and instance
 3864  * identification info is passed to xpt_bus_register to hook the SIM
 3865  * into the CAM framework.  xpt_bus_register creates a cam_eb entry
 3866  * for this new bus and places it in the array of busses and assigns
 3867  * it a path_id.  The path_id may be influenced by "hard wiring"
 3868  * information specified by the user.  Once interrupt services are
 3869  * available, the bus will be probed.
 3870  */
 3871 int32_t
 3872 xpt_bus_register(struct cam_sim *sim, device_t parent, u_int32_t bus)
 3873 {
 3874         struct cam_eb *new_bus;
 3875         struct cam_eb *old_bus;
 3876         struct ccb_pathinq cpi;
 3877         struct cam_path *path;
 3878         cam_status status;
 3879 
 3880         sim->bus_id = bus;
 3881         new_bus = (struct cam_eb *)malloc(sizeof(*new_bus),
 3882                                           M_CAMXPT, M_NOWAIT|M_ZERO);
 3883         if (new_bus == NULL) {
 3884                 /* Couldn't satisfy request */
 3885                 return (CAM_RESRC_UNAVAIL);
 3886         }
 3887 
 3888         mtx_init(&new_bus->eb_mtx, "CAM bus lock", NULL, MTX_DEF);
 3889         TAILQ_INIT(&new_bus->et_entries);
 3890         cam_sim_hold(sim);
 3891         new_bus->sim = sim;
 3892         timevalclear(&new_bus->last_reset);
 3893         new_bus->flags = 0;
 3894         new_bus->refcount = 1;  /* Held until a bus_deregister event */
 3895         new_bus->generation = 0;
 3896 
 3897         xpt_lock_buses();
 3898         sim->path_id = new_bus->path_id =
 3899             xptpathid(sim->sim_name, sim->unit_number, sim->bus_id);
 3900         old_bus = TAILQ_FIRST(&xsoftc.xpt_busses);
 3901         while (old_bus != NULL
 3902             && old_bus->path_id < new_bus->path_id)
 3903                 old_bus = TAILQ_NEXT(old_bus, links);
 3904         if (old_bus != NULL)
 3905                 TAILQ_INSERT_BEFORE(old_bus, new_bus, links);
 3906         else
 3907                 TAILQ_INSERT_TAIL(&xsoftc.xpt_busses, new_bus, links);
 3908         xsoftc.bus_generation++;
 3909         xpt_unlock_buses();
 3910 
 3911         /*
 3912          * Set a default transport so that a PATH_INQ can be issued to
 3913          * the SIM.  This will then allow for probing and attaching of
 3914          * a more appropriate transport.
 3915          */
 3916         new_bus->xport = &xport_default;
 3917 
 3918         status = xpt_create_path(&path, /*periph*/NULL, sim->path_id,
 3919                                   CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
 3920         if (status != CAM_REQ_CMP) {
 3921                 xpt_release_bus(new_bus);
 3922                 free(path, M_CAMXPT);
 3923                 return (CAM_RESRC_UNAVAIL);
 3924         }
 3925 
 3926         xpt_setup_ccb(&cpi.ccb_h, path, CAM_PRIORITY_NORMAL);
 3927         cpi.ccb_h.func_code = XPT_PATH_INQ;
 3928         xpt_action((union ccb *)&cpi);
 3929 
 3930         if (cpi.ccb_h.status == CAM_REQ_CMP) {
 3931                 switch (cpi.transport) {
 3932                 case XPORT_SPI:
 3933                 case XPORT_SAS:
 3934                 case XPORT_FC:
 3935                 case XPORT_USB:
 3936                 case XPORT_ISCSI:
 3937                 case XPORT_SRP:
 3938                 case XPORT_PPB:
 3939                         new_bus->xport = scsi_get_xport();
 3940                         break;
 3941                 case XPORT_ATA:
 3942                 case XPORT_SATA:
 3943                         new_bus->xport = ata_get_xport();
 3944                         break;
 3945                 default:
 3946                         new_bus->xport = &xport_default;
 3947                         break;
 3948                 }
 3949         }
 3950 
 3951         /* Notify interested parties */
 3952         if (sim->path_id != CAM_XPT_PATH_ID) {
 3953 
 3954                 xpt_async(AC_PATH_REGISTERED, path, &cpi);
 3955                 if ((cpi.hba_misc & PIM_NOSCAN) == 0) {
 3956                         union   ccb *scan_ccb;
 3957 
 3958                         /* Initiate bus rescan. */
 3959                         scan_ccb = xpt_alloc_ccb_nowait();
 3960                         if (scan_ccb != NULL) {
 3961                                 scan_ccb->ccb_h.path = path;
 3962                                 scan_ccb->ccb_h.func_code = XPT_SCAN_BUS;
 3963                                 scan_ccb->crcn.flags = 0;
 3964                                 xpt_rescan(scan_ccb);
 3965                         } else {
 3966                                 xpt_print(path,
 3967                                           "Can't allocate CCB to scan bus\n");
 3968                                 xpt_free_path(path);
 3969                         }
 3970                 } else
 3971                         xpt_free_path(path);
 3972         } else
 3973                 xpt_free_path(path);
 3974         return (CAM_SUCCESS);
 3975 }
 3976 
 3977 int32_t
 3978 xpt_bus_deregister(path_id_t pathid)
 3979 {
 3980         struct cam_path bus_path;
 3981         cam_status status;
 3982 
 3983         status = xpt_compile_path(&bus_path, NULL, pathid,
 3984                                   CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
 3985         if (status != CAM_REQ_CMP)
 3986                 return (status);
 3987 
 3988         xpt_async(AC_LOST_DEVICE, &bus_path, NULL);
 3989         xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL);
 3990 
 3991         /* Release the reference count held while registered. */
 3992         xpt_release_bus(bus_path.bus);
 3993         xpt_release_path(&bus_path);
 3994 
 3995         return (CAM_REQ_CMP);
 3996 }
 3997 
 3998 static path_id_t
 3999 xptnextfreepathid(void)
 4000 {
 4001         struct cam_eb *bus;
 4002         path_id_t pathid;
 4003         const char *strval;
 4004 
 4005         mtx_assert(&xsoftc.xpt_topo_lock, MA_OWNED);
 4006         pathid = 0;
 4007         bus = TAILQ_FIRST(&xsoftc.xpt_busses);
 4008 retry:
 4009         /* Find an unoccupied pathid */
 4010         while (bus != NULL && bus->path_id <= pathid) {
 4011                 if (bus->path_id == pathid)
 4012                         pathid++;
 4013                 bus = TAILQ_NEXT(bus, links);
 4014         }
 4015 
 4016         /*
 4017          * Ensure that this pathid is not reserved for
 4018          * a bus that may be registered in the future.
 4019          */
 4020         if (resource_string_value("scbus", pathid, "at", &strval) == 0) {
 4021                 ++pathid;
 4022                 /* Start the search over */
 4023                 goto retry;
 4024         }
 4025         return (pathid);
 4026 }
 4027 
 4028 static path_id_t
 4029 xptpathid(const char *sim_name, int sim_unit, int sim_bus)
 4030 {
 4031         path_id_t pathid;
 4032         int i, dunit, val;
 4033         char buf[32];
 4034         const char *dname;
 4035 
 4036         pathid = CAM_XPT_PATH_ID;
 4037         snprintf(buf, sizeof(buf), "%s%d", sim_name, sim_unit);
 4038         if (strcmp(buf, "xpt0") == 0 && sim_bus == 0)
 4039                 return (pathid);
 4040         i = 0;
 4041         while ((resource_find_match(&i, &dname, &dunit, "at", buf)) == 0) {
 4042                 if (strcmp(dname, "scbus")) {
 4043                         /* Avoid a bit of foot shooting. */
 4044                         continue;
 4045                 }
 4046                 if (dunit < 0)          /* unwired?! */
 4047                         continue;
 4048                 if (resource_int_value("scbus", dunit, "bus", &val) == 0) {
 4049                         if (sim_bus == val) {
 4050                                 pathid = dunit;
 4051                                 break;
 4052                         }
 4053                 } else if (sim_bus == 0) {
 4054                         /* Unspecified matches bus 0 */
 4055                         pathid = dunit;
 4056                         break;
 4057                 } else {
 4058                         printf("Ambiguous scbus configuration for %s%d "
 4059                                "bus %d, cannot wire down.  The kernel "
 4060                                "config entry for scbus%d should "
 4061                                "specify a controller bus.\n"
 4062                                "Scbus will be assigned dynamically.\n",
 4063                                sim_name, sim_unit, sim_bus, dunit);
 4064                         break;
 4065                 }
 4066         }
 4067 
 4068         if (pathid == CAM_XPT_PATH_ID)
 4069                 pathid = xptnextfreepathid();
 4070         return (pathid);
 4071 }
 4072 
 4073 static const char *
 4074 xpt_async_string(u_int32_t async_code)
 4075 {
 4076 
 4077         switch (async_code) {
 4078         case AC_BUS_RESET: return ("AC_BUS_RESET");
 4079         case AC_UNSOL_RESEL: return ("AC_UNSOL_RESEL");
 4080         case AC_SCSI_AEN: return ("AC_SCSI_AEN");
 4081         case AC_SENT_BDR: return ("AC_SENT_BDR");
 4082         case AC_PATH_REGISTERED: return ("AC_PATH_REGISTERED");
 4083         case AC_PATH_DEREGISTERED: return ("AC_PATH_DEREGISTERED");
 4084         case AC_FOUND_DEVICE: return ("AC_FOUND_DEVICE");
 4085         case AC_LOST_DEVICE: return ("AC_LOST_DEVICE");
 4086         case AC_TRANSFER_NEG: return ("AC_TRANSFER_NEG");
 4087         case AC_INQ_CHANGED: return ("AC_INQ_CHANGED");
 4088         case AC_GETDEV_CHANGED: return ("AC_GETDEV_CHANGED");
 4089         case AC_CONTRACT: return ("AC_CONTRACT");
 4090         case AC_ADVINFO_CHANGED: return ("AC_ADVINFO_CHANGED");
 4091         case AC_UNIT_ATTENTION: return ("AC_UNIT_ATTENTION");
 4092         }
 4093         return ("AC_UNKNOWN");
 4094 }
 4095 
 4096 static int
 4097 xpt_async_size(u_int32_t async_code)
 4098 {
 4099 
 4100         switch (async_code) {
 4101         case AC_BUS_RESET: return (0);
 4102         case AC_UNSOL_RESEL: return (0);
 4103         case AC_SCSI_AEN: return (0);
 4104         case AC_SENT_BDR: return (0);
 4105         case AC_PATH_REGISTERED: return (sizeof(struct ccb_pathinq));
 4106         case AC_PATH_DEREGISTERED: return (0);
 4107         case AC_FOUND_DEVICE: return (sizeof(struct ccb_getdev));
 4108         case AC_LOST_DEVICE: return (0);
 4109         case AC_TRANSFER_NEG: return (sizeof(struct ccb_trans_settings));
 4110         case AC_INQ_CHANGED: return (0);
 4111         case AC_GETDEV_CHANGED: return (0);
 4112         case AC_CONTRACT: return (sizeof(struct ac_contract));
 4113         case AC_ADVINFO_CHANGED: return (-1);
 4114         case AC_UNIT_ATTENTION: return (sizeof(struct ccb_scsiio));
 4115         }
 4116         return (0);
 4117 }
 4118 
 4119 static int
 4120 xpt_async_process_dev(struct cam_ed *device, void *arg)
 4121 {
 4122         union ccb *ccb = arg;
 4123         struct cam_path *path = ccb->ccb_h.path;
 4124         void *async_arg = ccb->casync.async_arg_ptr;
 4125         u_int32_t async_code = ccb->casync.async_code;
 4126         int relock;
 4127 
 4128         if (path->device != device
 4129          && path->device->lun_id != CAM_LUN_WILDCARD
 4130          && device->lun_id != CAM_LUN_WILDCARD)
 4131                 return (1);
 4132 
 4133         /*
 4134          * The async callback could free the device.
 4135          * If it is a broadcast async, it doesn't hold
 4136          * device reference, so take our own reference.
 4137          */
 4138         xpt_acquire_device(device);
 4139 
 4140         /*
 4141          * If async for specific device is to be delivered to
 4142          * the wildcard client, take the specific device lock.
 4143          * XXX: We may need a way for client to specify it.
 4144          */
 4145         if ((device->lun_id == CAM_LUN_WILDCARD &&
 4146              path->device->lun_id != CAM_LUN_WILDCARD) ||
 4147             (device->target->target_id == CAM_TARGET_WILDCARD &&
 4148              path->target->target_id != CAM_TARGET_WILDCARD) ||
 4149             (device->target->bus->path_id == CAM_BUS_WILDCARD &&
 4150              path->target->bus->path_id != CAM_BUS_WILDCARD)) {
 4151                 mtx_unlock(&device->device_mtx);
 4152                 xpt_path_lock(path);
 4153                 relock = 1;
 4154         } else
 4155                 relock = 0;
 4156 
 4157         (*(device->target->bus->xport->async))(async_code,
 4158             device->target->bus, device->target, device, async_arg);
 4159         xpt_async_bcast(&device->asyncs, async_code, path, async_arg);
 4160 
 4161         if (relock) {
 4162                 xpt_path_unlock(path);
 4163                 mtx_lock(&device->device_mtx);
 4164         }
 4165         xpt_release_device(device);
 4166         return (1);
 4167 }
 4168 
 4169 static int
 4170 xpt_async_process_tgt(struct cam_et *target, void *arg)
 4171 {
 4172         union ccb *ccb = arg;
 4173         struct cam_path *path = ccb->ccb_h.path;
 4174 
 4175         if (path->target != target
 4176          && path->target->target_id != CAM_TARGET_WILDCARD
 4177          && target->target_id != CAM_TARGET_WILDCARD)
 4178                 return (1);
 4179 
 4180         if (ccb->casync.async_code == AC_SENT_BDR) {
 4181                 /* Update our notion of when the last reset occurred */
 4182                 microtime(&target->last_reset);
 4183         }
 4184 
 4185         return (xptdevicetraverse(target, NULL, xpt_async_process_dev, ccb));
 4186 }
 4187 
 4188 static void
 4189 xpt_async_process(struct cam_periph *periph, union ccb *ccb)
 4190 {
 4191         struct cam_eb *bus;
 4192         struct cam_path *path;
 4193         void *async_arg;
 4194         u_int32_t async_code;
 4195 
 4196         path = ccb->ccb_h.path;
 4197         async_code = ccb->casync.async_code;
 4198         async_arg = ccb->casync.async_arg_ptr;
 4199         CAM_DEBUG(path, CAM_DEBUG_TRACE | CAM_DEBUG_INFO,
 4200             ("xpt_async(%s)\n", xpt_async_string(async_code)));
 4201         bus = path->bus;
 4202 
 4203         if (async_code == AC_BUS_RESET) {
 4204                 /* Update our notion of when the last reset occurred */
 4205                 microtime(&bus->last_reset);
 4206         }
 4207 
 4208         xpttargettraverse(bus, NULL, xpt_async_process_tgt, ccb);
 4209 
 4210         /*
 4211          * If this wasn't a fully wildcarded async, tell all
 4212          * clients that want all async events.
 4213          */
 4214         if (bus != xpt_periph->path->bus) {
 4215                 xpt_path_lock(xpt_periph->path);
 4216                 xpt_async_process_dev(xpt_periph->path->device, ccb);
 4217                 xpt_path_unlock(xpt_periph->path);
 4218         }
 4219 
 4220         if (path->device != NULL && path->device->lun_id != CAM_LUN_WILDCARD)
 4221                 xpt_release_devq(path, 1, TRUE);
 4222         else
 4223                 xpt_release_simq(path->bus->sim, TRUE);
 4224         if (ccb->casync.async_arg_size > 0)
 4225                 free(async_arg, M_CAMXPT);
 4226         xpt_free_path(path);
 4227         xpt_free_ccb(ccb);
 4228 }
 4229 
 4230 static void
 4231 xpt_async_bcast(struct async_list *async_head,
 4232                 u_int32_t async_code,
 4233                 struct cam_path *path, void *async_arg)
 4234 {
 4235         struct async_node *cur_entry;
 4236         struct mtx *mtx;
 4237 
 4238         cur_entry = SLIST_FIRST(async_head);
 4239         while (cur_entry != NULL) {
 4240                 struct async_node *next_entry;
 4241                 /*
 4242                  * Grab the next list entry before we call the current
 4243                  * entry's callback.  This is because the callback function
 4244                  * can delete its async callback entry.
 4245                  */
 4246                 next_entry = SLIST_NEXT(cur_entry, links);
 4247                 if ((cur_entry->event_enable & async_code) != 0) {
 4248                         mtx = cur_entry->event_lock ?
 4249                             path->device->sim->mtx : NULL;
 4250                         if (mtx)
 4251                                 mtx_lock(mtx);
 4252                         cur_entry->callback(cur_entry->callback_arg,
 4253                                             async_code, path,
 4254                                             async_arg);
 4255                         if (mtx)
 4256                                 mtx_unlock(mtx);
 4257                 }
 4258                 cur_entry = next_entry;
 4259         }
 4260 }
 4261 
 4262 void
 4263 xpt_async(u_int32_t async_code, struct cam_path *path, void *async_arg)
 4264 {
 4265         union ccb *ccb;
 4266         int size;
 4267 
 4268         ccb = xpt_alloc_ccb_nowait();
 4269         if (ccb == NULL) {
 4270                 xpt_print(path, "Can't allocate CCB to send %s\n",
 4271                     xpt_async_string(async_code));
 4272                 return;
 4273         }
 4274 
 4275         if (xpt_clone_path(&ccb->ccb_h.path, path) != CAM_REQ_CMP) {
 4276                 xpt_print(path, "Can't allocate path to send %s\n",
 4277                     xpt_async_string(async_code));
 4278                 xpt_free_ccb(ccb);
 4279                 return;
 4280         }
 4281         ccb->ccb_h.path->periph = NULL;
 4282         ccb->ccb_h.func_code = XPT_ASYNC;
 4283         ccb->ccb_h.cbfcnp = xpt_async_process;
 4284         ccb->ccb_h.flags |= CAM_UNLOCKED;
 4285         ccb->casync.async_code = async_code;
 4286         ccb->casync.async_arg_size = 0;
 4287         size = xpt_async_size(async_code);
 4288         if (size > 0 && async_arg != NULL) {
 4289                 ccb->casync.async_arg_ptr = malloc(size, M_CAMXPT, M_NOWAIT);
 4290                 if (ccb->casync.async_arg_ptr == NULL) {
 4291                         xpt_print(path, "Can't allocate argument to send %s\n",
 4292                             xpt_async_string(async_code));
 4293                         xpt_free_path(ccb->ccb_h.path);
 4294                         xpt_free_ccb(ccb);
 4295                         return;
 4296                 }
 4297                 memcpy(ccb->casync.async_arg_ptr, async_arg, size);
 4298                 ccb->casync.async_arg_size = size;
 4299         } else if (size < 0) {
 4300                 ccb->casync.async_arg_ptr = async_arg;
 4301                 ccb->casync.async_arg_size = size;
 4302         }
 4303         if (path->device != NULL && path->device->lun_id != CAM_LUN_WILDCARD)
 4304                 xpt_freeze_devq(path, 1);
 4305         else
 4306                 xpt_freeze_simq(path->bus->sim, 1);
 4307         xpt_done(ccb);
 4308 }
 4309 
 4310 static void
 4311 xpt_dev_async_default(u_int32_t async_code, struct cam_eb *bus,
 4312                       struct cam_et *target, struct cam_ed *device,
 4313                       void *async_arg)
 4314 {
 4315 
 4316         /*
 4317          * We only need to handle events for real devices.
 4318          */
 4319         if (target->target_id == CAM_TARGET_WILDCARD
 4320          || device->lun_id == CAM_LUN_WILDCARD)
 4321                 return;
 4322 
 4323         printf("%s called\n", __func__);
 4324 }
 4325 
 4326 static uint32_t
 4327 xpt_freeze_devq_device(struct cam_ed *dev, u_int count)
 4328 {
 4329         struct cam_devq *devq;
 4330         uint32_t freeze;
 4331 
 4332         devq = dev->sim->devq;
 4333         mtx_assert(&devq->send_mtx, MA_OWNED);
 4334         CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE,
 4335             ("xpt_freeze_devq_device(%d) %u->%u\n", count,
 4336             dev->ccbq.queue.qfrozen_cnt, dev->ccbq.queue.qfrozen_cnt + count));
 4337         freeze = (dev->ccbq.queue.qfrozen_cnt += count);
 4338         /* Remove frozen device from sendq. */
 4339         if (device_is_queued(dev))
 4340                 camq_remove(&devq->send_queue, dev->devq_entry.index);
 4341         return (freeze);
 4342 }
 4343 
 4344 u_int32_t
 4345 xpt_freeze_devq(struct cam_path *path, u_int count)
 4346 {
 4347         struct cam_ed   *dev = path->device;
 4348         struct cam_devq *devq;
 4349         uint32_t         freeze;
 4350 
 4351         devq = dev->sim->devq;
 4352         mtx_lock(&devq->send_mtx);
 4353         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_freeze_devq(%d)\n", count));
 4354         freeze = xpt_freeze_devq_device(dev, count);
 4355         mtx_unlock(&devq->send_mtx);
 4356         return (freeze);
 4357 }
 4358 
 4359 u_int32_t
 4360 xpt_freeze_simq(struct cam_sim *sim, u_int count)
 4361 {
 4362         struct cam_devq *devq;
 4363         uint32_t         freeze;
 4364 
 4365         devq = sim->devq;
 4366         mtx_lock(&devq->send_mtx);
 4367         freeze = (devq->send_queue.qfrozen_cnt += count);
 4368         mtx_unlock(&devq->send_mtx);
 4369         return (freeze);
 4370 }
 4371 
 4372 static void
 4373 xpt_release_devq_timeout(void *arg)
 4374 {
 4375         struct cam_ed *dev;
 4376         struct cam_devq *devq;
 4377 
 4378         dev = (struct cam_ed *)arg;
 4379         CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE, ("xpt_release_devq_timeout\n"));
 4380         devq = dev->sim->devq;
 4381         mtx_assert(&devq->send_mtx, MA_OWNED);
 4382         if (xpt_release_devq_device(dev, /*count*/1, /*run_queue*/TRUE))
 4383                 xpt_run_devq(devq);
 4384 }
 4385 
 4386 void
 4387 xpt_release_devq(struct cam_path *path, u_int count, int run_queue)
 4388 {
 4389         struct cam_ed *dev;
 4390         struct cam_devq *devq;
 4391 
 4392         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_devq(%d, %d)\n",
 4393             count, run_queue));
 4394         dev = path->device;
 4395         devq = dev->sim->devq;
 4396         mtx_lock(&devq->send_mtx);
 4397         if (xpt_release_devq_device(dev, count, run_queue))
 4398                 xpt_run_devq(dev->sim->devq);
 4399         mtx_unlock(&devq->send_mtx);
 4400 }
 4401 
 4402 static int
 4403 xpt_release_devq_device(struct cam_ed *dev, u_int count, int run_queue)
 4404 {
 4405 
 4406         mtx_assert(&dev->sim->devq->send_mtx, MA_OWNED);
 4407         CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE,
 4408             ("xpt_release_devq_device(%d, %d) %u->%u\n", count, run_queue,
 4409             dev->ccbq.queue.qfrozen_cnt, dev->ccbq.queue.qfrozen_cnt - count));
 4410         if (count > dev->ccbq.queue.qfrozen_cnt) {
 4411 #ifdef INVARIANTS
 4412                 printf("xpt_release_devq(): requested %u > present %u\n",
 4413                     count, dev->ccbq.queue.qfrozen_cnt);
 4414 #endif
 4415                 count = dev->ccbq.queue.qfrozen_cnt;
 4416         }
 4417         dev->ccbq.queue.qfrozen_cnt -= count;
 4418         if (dev->ccbq.queue.qfrozen_cnt == 0) {
 4419                 /*
 4420                  * No longer need to wait for a successful
 4421                  * command completion.
 4422                  */
 4423                 dev->flags &= ~CAM_DEV_REL_ON_COMPLETE;
 4424                 /*
 4425                  * Remove any timeouts that might be scheduled
 4426                  * to release this queue.
 4427                  */
 4428                 if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
 4429                         callout_stop(&dev->callout);
 4430                         dev->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING;
 4431                 }
 4432                 /*
 4433                  * Now that we are unfrozen schedule the
 4434                  * device so any pending transactions are
 4435                  * run.
 4436                  */
 4437                 xpt_schedule_devq(dev->sim->devq, dev);
 4438         } else
 4439                 run_queue = 0;
 4440         return (run_queue);
 4441 }
 4442 
 4443 void
 4444 xpt_release_simq(struct cam_sim *sim, int run_queue)
 4445 {
 4446         struct cam_devq *devq;
 4447 
 4448         devq = sim->devq;
 4449         mtx_lock(&devq->send_mtx);
 4450         if (devq->send_queue.qfrozen_cnt <= 0) {
 4451 #ifdef INVARIANTS
 4452                 printf("xpt_release_simq: requested 1 > present %u\n",
 4453                     devq->send_queue.qfrozen_cnt);
 4454 #endif
 4455         } else
 4456                 devq->send_queue.qfrozen_cnt--;
 4457         if (devq->send_queue.qfrozen_cnt == 0) {
 4458                 /*
 4459                  * If there is a timeout scheduled to release this
 4460                  * sim queue, remove it.  The queue frozen count is
 4461                  * already at 0.
 4462                  */
 4463                 if ((sim->flags & CAM_SIM_REL_TIMEOUT_PENDING) != 0){
 4464                         callout_stop(&sim->callout);
 4465                         sim->flags &= ~CAM_SIM_REL_TIMEOUT_PENDING;
 4466                 }
 4467                 if (run_queue) {
 4468                         /*
 4469                          * Now that we are unfrozen run the send queue.
 4470                          */
 4471                         xpt_run_devq(sim->devq);
 4472                 }
 4473         }
 4474         mtx_unlock(&devq->send_mtx);
 4475 }
 4476 
 4477 /*
 4478  * XXX Appears to be unused.
 4479  */
 4480 static void
 4481 xpt_release_simq_timeout(void *arg)
 4482 {
 4483         struct cam_sim *sim;
 4484 
 4485         sim = (struct cam_sim *)arg;
 4486         xpt_release_simq(sim, /* run_queue */ TRUE);
 4487 }
 4488 
 4489 void
 4490 xpt_done(union ccb *done_ccb)
 4491 {
 4492         struct cam_doneq *queue;
 4493         int     run, hash;
 4494 
 4495         CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_done\n"));
 4496         if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) == 0)
 4497                 return;
 4498 
 4499         hash = (done_ccb->ccb_h.path_id + done_ccb->ccb_h.target_id +
 4500             done_ccb->ccb_h.target_lun) % cam_num_doneqs;
 4501         queue = &cam_doneqs[hash];
 4502         mtx_lock(&queue->cam_doneq_mtx);
 4503         run = (queue->cam_doneq_sleep && STAILQ_EMPTY(&queue->cam_doneq));
 4504         STAILQ_INSERT_TAIL(&queue->cam_doneq, &done_ccb->ccb_h, sim_links.stqe);
 4505         done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX;
 4506         mtx_unlock(&queue->cam_doneq_mtx);
 4507         if (run)
 4508                 wakeup(&queue->cam_doneq);
 4509 }
 4510 
 4511 void
 4512 xpt_done_direct(union ccb *done_ccb)
 4513 {
 4514 
 4515         CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_done_direct\n"));
 4516         if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) == 0)
 4517                 return;
 4518 
 4519         xpt_done_process(&done_ccb->ccb_h);
 4520 }
 4521 
 4522 union ccb *
 4523 xpt_alloc_ccb()
 4524 {
 4525         union ccb *new_ccb;
 4526 
 4527         new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_WAITOK);
 4528         return (new_ccb);
 4529 }
 4530 
 4531 union ccb *
 4532 xpt_alloc_ccb_nowait()
 4533 {
 4534         union ccb *new_ccb;
 4535 
 4536         new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_NOWAIT);
 4537         return (new_ccb);
 4538 }
 4539 
 4540 void
 4541 xpt_free_ccb(union ccb *free_ccb)
 4542 {
 4543         free(free_ccb, M_CAMCCB);
 4544 }
 4545 
 4546 
 4547 
 4548 /* Private XPT functions */
 4549 
 4550 /*
 4551  * Get a CAM control block for the caller. Charge the structure to the device
 4552  * referenced by the path.  If we don't have sufficient resources to allocate
 4553  * more ccbs, we return NULL.
 4554  */
 4555 static union ccb *
 4556 xpt_get_ccb_nowait(struct cam_periph *periph)
 4557 {
 4558         union ccb *new_ccb;
 4559 
 4560         new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_NOWAIT);
 4561         if (new_ccb == NULL)
 4562                 return (NULL);
 4563         periph->periph_allocated++;
 4564         cam_ccbq_take_opening(&periph->path->device->ccbq);
 4565         return (new_ccb);
 4566 }
 4567 
 4568 static union ccb *
 4569 xpt_get_ccb(struct cam_periph *periph)
 4570 {
 4571         union ccb *new_ccb;
 4572 
 4573         cam_periph_unlock(periph);
 4574         new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_WAITOK);
 4575         cam_periph_lock(periph);
 4576         periph->periph_allocated++;
 4577         cam_ccbq_take_opening(&periph->path->device->ccbq);
 4578         return (new_ccb);
 4579 }
 4580 
 4581 union ccb *
 4582 cam_periph_getccb(struct cam_periph *periph, u_int32_t priority)
 4583 {
 4584         struct ccb_hdr *ccb_h;
 4585 
 4586         CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("cam_periph_getccb\n"));
 4587         cam_periph_assert(periph, MA_OWNED);
 4588         while ((ccb_h = SLIST_FIRST(&periph->ccb_list)) == NULL ||
 4589             ccb_h->pinfo.priority != priority) {
 4590                 if (priority < periph->immediate_priority) {
 4591                         periph->immediate_priority = priority;
 4592                         xpt_run_allocq(periph, 0);
 4593                 } else
 4594                         cam_periph_sleep(periph, &periph->ccb_list, PRIBIO,
 4595                             "cgticb", 0);
 4596         }
 4597         SLIST_REMOVE_HEAD(&periph->ccb_list, periph_links.sle);
 4598         return ((union ccb *)ccb_h);
 4599 }
 4600 
 4601 static void
 4602 xpt_acquire_bus(struct cam_eb *bus)
 4603 {
 4604 
 4605         xpt_lock_buses();
 4606         bus->refcount++;
 4607         xpt_unlock_buses();
 4608 }
 4609 
 4610 static void
 4611 xpt_release_bus(struct cam_eb *bus)
 4612 {
 4613 
 4614         xpt_lock_buses();
 4615         KASSERT(bus->refcount >= 1, ("bus->refcount >= 1"));
 4616         if (--bus->refcount > 0) {
 4617                 xpt_unlock_buses();
 4618                 return;
 4619         }
 4620         TAILQ_REMOVE(&xsoftc.xpt_busses, bus, links);
 4621         xsoftc.bus_generation++;
 4622         xpt_unlock_buses();
 4623         KASSERT(TAILQ_EMPTY(&bus->et_entries),
 4624             ("destroying bus, but target list is not empty"));
 4625         cam_sim_release(bus->sim);
 4626         mtx_destroy(&bus->eb_mtx);
 4627         free(bus, M_CAMXPT);
 4628 }
 4629 
 4630 static struct cam_et *
 4631 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id)
 4632 {
 4633         struct cam_et *cur_target, *target;
 4634 
 4635         mtx_assert(&xsoftc.xpt_topo_lock, MA_OWNED);
 4636         mtx_assert(&bus->eb_mtx, MA_OWNED);
 4637         target = (struct cam_et *)malloc(sizeof(*target), M_CAMXPT,
 4638                                          M_NOWAIT|M_ZERO);
 4639         if (target == NULL)
 4640                 return (NULL);
 4641 
 4642         TAILQ_INIT(&target->ed_entries);
 4643         target->bus = bus;
 4644         target->target_id = target_id;
 4645         target->refcount = 1;
 4646         target->generation = 0;
 4647         target->luns = NULL;
 4648         mtx_init(&target->luns_mtx, "CAM LUNs lock", NULL, MTX_DEF);
 4649         timevalclear(&target->last_reset);
 4650         /*
 4651          * Hold a reference to our parent bus so it
 4652          * will not go away before we do.
 4653          */
 4654         bus->refcount++;
 4655 
 4656         /* Insertion sort into our bus's target list */
 4657         cur_target = TAILQ_FIRST(&bus->et_entries);
 4658         while (cur_target != NULL && cur_target->target_id < target_id)
 4659                 cur_target = TAILQ_NEXT(cur_target, links);
 4660         if (cur_target != NULL) {
 4661                 TAILQ_INSERT_BEFORE(cur_target, target, links);
 4662         } else {
 4663                 TAILQ_INSERT_TAIL(&bus->et_entries, target, links);
 4664         }
 4665         bus->generation++;
 4666         return (target);
 4667 }
 4668 
 4669 static void
 4670 xpt_acquire_target(struct cam_et *target)
 4671 {
 4672         struct cam_eb *bus = target->bus;
 4673 
 4674         mtx_lock(&bus->eb_mtx);
 4675         target->refcount++;
 4676         mtx_unlock(&bus->eb_mtx);
 4677 }
 4678 
 4679 static void
 4680 xpt_release_target(struct cam_et *target)
 4681 {
 4682         struct cam_eb *bus = target->bus;
 4683 
 4684         mtx_lock(&bus->eb_mtx);
 4685         if (--target->refcount > 0) {
 4686                 mtx_unlock(&bus->eb_mtx);
 4687                 return;
 4688         }
 4689         TAILQ_REMOVE(&bus->et_entries, target, links);
 4690         bus->generation++;
 4691         mtx_unlock(&bus->eb_mtx);
 4692         KASSERT(TAILQ_EMPTY(&target->ed_entries),
 4693             ("destroying target, but device list is not empty"));
 4694         xpt_release_bus(bus);
 4695         mtx_destroy(&target->luns_mtx);
 4696         if (target->luns)
 4697                 free(target->luns, M_CAMXPT);
 4698         free(target, M_CAMXPT);
 4699 }
 4700 
 4701 static struct cam_ed *
 4702 xpt_alloc_device_default(struct cam_eb *bus, struct cam_et *target,
 4703                          lun_id_t lun_id)
 4704 {
 4705         struct cam_ed *device;
 4706 
 4707         device = xpt_alloc_device(bus, target, lun_id);
 4708         if (device == NULL)
 4709                 return (NULL);
 4710 
 4711         device->mintags = 1;
 4712         device->maxtags = 1;
 4713         return (device);
 4714 }
 4715 
 4716 static void
 4717 xpt_destroy_device(void *context, int pending)
 4718 {
 4719         struct cam_ed   *device = context;
 4720 
 4721         mtx_lock(&device->device_mtx);
 4722         mtx_destroy(&device->device_mtx);
 4723         free(device, M_CAMDEV);
 4724 }
 4725 
 4726 struct cam_ed *
 4727 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id)
 4728 {
 4729         struct cam_ed   *cur_device, *device;
 4730         struct cam_devq *devq;
 4731         cam_status status;
 4732 
 4733         mtx_assert(&bus->eb_mtx, MA_OWNED);
 4734         /* Make space for us in the device queue on our bus */
 4735         devq = bus->sim->devq;
 4736         mtx_lock(&devq->send_mtx);
 4737         status = cam_devq_resize(devq, devq->send_queue.array_size + 1);
 4738         mtx_unlock(&devq->send_mtx);
 4739         if (status != CAM_REQ_CMP)
 4740                 return (NULL);
 4741 
 4742         device = (struct cam_ed *)malloc(sizeof(*device),
 4743                                          M_CAMDEV, M_NOWAIT|M_ZERO);
 4744         if (device == NULL)
 4745                 return (NULL);
 4746 
 4747         cam_init_pinfo(&device->devq_entry);
 4748         device->target = target;
 4749         device->lun_id = lun_id;
 4750         device->sim = bus->sim;
 4751         if (cam_ccbq_init(&device->ccbq,
 4752                           bus->sim->max_dev_openings) != 0) {
 4753                 free(device, M_CAMDEV);
 4754                 return (NULL);
 4755         }
 4756         SLIST_INIT(&device->asyncs);
 4757         SLIST_INIT(&device->periphs);
 4758         device->generation = 0;
 4759         device->flags = CAM_DEV_UNCONFIGURED;
 4760         device->tag_delay_count = 0;
 4761         device->tag_saved_openings = 0;
 4762         device->refcount = 1;
 4763         mtx_init(&device->device_mtx, "CAM device lock", NULL, MTX_DEF);
 4764         callout_init_mtx(&device->callout, &devq->send_mtx, 0);
 4765         TASK_INIT(&device->device_destroy_task, 0, xpt_destroy_device, device);
 4766         /*
 4767          * Hold a reference to our parent bus so it
 4768          * will not go away before we do.
 4769          */
 4770         target->refcount++;
 4771 
 4772         cur_device = TAILQ_FIRST(&target->ed_entries);
 4773         while (cur_device != NULL && cur_device->lun_id < lun_id)
 4774                 cur_device = TAILQ_NEXT(cur_device, links);
 4775         if (cur_device != NULL)
 4776                 TAILQ_INSERT_BEFORE(cur_device, device, links);
 4777         else
 4778                 TAILQ_INSERT_TAIL(&target->ed_entries, device, links);
 4779         target->generation++;
 4780         return (device);
 4781 }
 4782 
 4783 void
 4784 xpt_acquire_device(struct cam_ed *device)
 4785 {
 4786         struct cam_eb *bus = device->target->bus;
 4787 
 4788         mtx_lock(&bus->eb_mtx);
 4789         device->refcount++;
 4790         mtx_unlock(&bus->eb_mtx);
 4791 }
 4792 
 4793 void
 4794 xpt_release_device(struct cam_ed *device)
 4795 {
 4796         struct cam_eb *bus = device->target->bus;
 4797         struct cam_devq *devq;
 4798 
 4799         mtx_lock(&bus->eb_mtx);
 4800         if (--device->refcount > 0) {
 4801                 mtx_unlock(&bus->eb_mtx);
 4802                 return;
 4803         }
 4804 
 4805         TAILQ_REMOVE(&device->target->ed_entries, device,links);
 4806         device->target->generation++;
 4807         mtx_unlock(&bus->eb_mtx);
 4808 
 4809         /* Release our slot in the devq */
 4810         devq = bus->sim->devq;
 4811         mtx_lock(&devq->send_mtx);
 4812         cam_devq_resize(devq, devq->send_queue.array_size - 1);
 4813         mtx_unlock(&devq->send_mtx);
 4814 
 4815         KASSERT(SLIST_EMPTY(&device->periphs),
 4816             ("destroying device, but periphs list is not empty"));
 4817         KASSERT(device->devq_entry.index == CAM_UNQUEUED_INDEX,
 4818             ("destroying device while still queued for ccbs"));
 4819 
 4820         if ((device->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0)
 4821                 callout_stop(&device->callout);
 4822 
 4823         xpt_release_target(device->target);
 4824 
 4825         cam_ccbq_fini(&device->ccbq);
 4826         /*
 4827          * Free allocated memory.  free(9) does nothing if the
 4828          * supplied pointer is NULL, so it is safe to call without
 4829          * checking.
 4830          */
 4831         free(device->supported_vpds, M_CAMXPT);
 4832         free(device->device_id, M_CAMXPT);
 4833         free(device->ext_inq, M_CAMXPT);
 4834         free(device->physpath, M_CAMXPT);
 4835         free(device->rcap_buf, M_CAMXPT);
 4836         free(device->serial_num, M_CAMXPT);
 4837         taskqueue_enqueue(xsoftc.xpt_taskq, &device->device_destroy_task);
 4838 }
 4839 
 4840 u_int32_t
 4841 xpt_dev_ccbq_resize(struct cam_path *path, int newopenings)
 4842 {
 4843         int     result;
 4844         struct  cam_ed *dev;
 4845 
 4846         dev = path->device;
 4847         mtx_lock(&dev->sim->devq->send_mtx);
 4848         result = cam_ccbq_resize(&dev->ccbq, newopenings);
 4849         mtx_unlock(&dev->sim->devq->send_mtx);
 4850         if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
 4851          || (dev->inq_flags & SID_CmdQue) != 0)
 4852                 dev->tag_saved_openings = newopenings;
 4853         return (result);
 4854 }
 4855 
 4856 static struct cam_eb *
 4857 xpt_find_bus(path_id_t path_id)
 4858 {
 4859         struct cam_eb *bus;
 4860 
 4861         xpt_lock_buses();
 4862         for (bus = TAILQ_FIRST(&xsoftc.xpt_busses);
 4863              bus != NULL;
 4864              bus = TAILQ_NEXT(bus, links)) {
 4865                 if (bus->path_id == path_id) {
 4866                         bus->refcount++;
 4867                         break;
 4868                 }
 4869         }
 4870         xpt_unlock_buses();
 4871         return (bus);
 4872 }
 4873 
 4874 static struct cam_et *
 4875 xpt_find_target(struct cam_eb *bus, target_id_t target_id)
 4876 {
 4877         struct cam_et *target;
 4878 
 4879         mtx_assert(&bus->eb_mtx, MA_OWNED);
 4880         for (target = TAILQ_FIRST(&bus->et_entries);
 4881              target != NULL;
 4882              target = TAILQ_NEXT(target, links)) {
 4883                 if (target->target_id == target_id) {
 4884                         target->refcount++;
 4885                         break;
 4886                 }
 4887         }
 4888         return (target);
 4889 }
 4890 
 4891 static struct cam_ed *
 4892 xpt_find_device(struct cam_et *target, lun_id_t lun_id)
 4893 {
 4894         struct cam_ed *device;
 4895 
 4896         mtx_assert(&target->bus->eb_mtx, MA_OWNED);
 4897         for (device = TAILQ_FIRST(&target->ed_entries);
 4898              device != NULL;
 4899              device = TAILQ_NEXT(device, links)) {
 4900                 if (device->lun_id == lun_id) {
 4901                         device->refcount++;
 4902                         break;
 4903                 }
 4904         }
 4905         return (device);
 4906 }
 4907 
 4908 void
 4909 xpt_start_tags(struct cam_path *path)
 4910 {
 4911         struct ccb_relsim crs;
 4912         struct cam_ed *device;
 4913         struct cam_sim *sim;
 4914         int    newopenings;
 4915 
 4916         device = path->device;
 4917         sim = path->bus->sim;
 4918         device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
 4919         xpt_freeze_devq(path, /*count*/1);
 4920         device->inq_flags |= SID_CmdQue;
 4921         if (device->tag_saved_openings != 0)
 4922                 newopenings = device->tag_saved_openings;
 4923         else
 4924                 newopenings = min(device->maxtags,
 4925                                   sim->max_tagged_dev_openings);
 4926         xpt_dev_ccbq_resize(path, newopenings);
 4927         xpt_async(AC_GETDEV_CHANGED, path, NULL);
 4928         xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL);
 4929         crs.ccb_h.func_code = XPT_REL_SIMQ;
 4930         crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
 4931         crs.openings
 4932             = crs.release_timeout
 4933             = crs.qfrozen_cnt
 4934             = 0;
 4935         xpt_action((union ccb *)&crs);
 4936 }
 4937 
 4938 void
 4939 xpt_stop_tags(struct cam_path *path)
 4940 {
 4941         struct ccb_relsim crs;
 4942         struct cam_ed *device;
 4943         struct cam_sim *sim;
 4944 
 4945         device = path->device;
 4946         sim = path->bus->sim;
 4947         device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
 4948         device->tag_delay_count = 0;
 4949         xpt_freeze_devq(path, /*count*/1);
 4950         device->inq_flags &= ~SID_CmdQue;
 4951         xpt_dev_ccbq_resize(path, sim->max_dev_openings);
 4952         xpt_async(AC_GETDEV_CHANGED, path, NULL);
 4953         xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL);
 4954         crs.ccb_h.func_code = XPT_REL_SIMQ;
 4955         crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
 4956         crs.openings
 4957             = crs.release_timeout
 4958             = crs.qfrozen_cnt
 4959             = 0;
 4960         xpt_action((union ccb *)&crs);
 4961 }
 4962 
 4963 static void
 4964 xpt_boot_delay(void *arg)
 4965 {
 4966 
 4967         xpt_release_boot();
 4968 }
 4969 
 4970 static void
 4971 xpt_config(void *arg)
 4972 {
 4973         /*
 4974          * Now that interrupts are enabled, go find our devices
 4975          */
 4976         if (taskqueue_start_threads(&xsoftc.xpt_taskq, 1, PRIBIO, "CAM taskq"))
 4977                 printf("xpt_config: failed to create taskqueue thread.\n");
 4978 
 4979         /* Setup debugging path */
 4980         if (cam_dflags != CAM_DEBUG_NONE) {
 4981                 if (xpt_create_path(&cam_dpath, NULL,
 4982                                     CAM_DEBUG_BUS, CAM_DEBUG_TARGET,
 4983                                     CAM_DEBUG_LUN) != CAM_REQ_CMP) {
 4984                         printf("xpt_config: xpt_create_path() failed for debug"
 4985                                " target %d:%d:%d, debugging disabled\n",
 4986                                CAM_DEBUG_BUS, CAM_DEBUG_TARGET, CAM_DEBUG_LUN);
 4987                         cam_dflags = CAM_DEBUG_NONE;
 4988                 }
 4989         } else
 4990                 cam_dpath = NULL;
 4991 
 4992         periphdriver_init(1);
 4993         xpt_hold_boot();
 4994         callout_init(&xsoftc.boot_callout, 1);
 4995         callout_reset_sbt(&xsoftc.boot_callout, SBT_1MS * xsoftc.boot_delay, 0,
 4996             xpt_boot_delay, NULL, 0);
 4997         /* Fire up rescan thread. */
 4998         if (kproc_kthread_add(xpt_scanner_thread, NULL, &cam_proc, NULL, 0, 0,
 4999             "cam", "scanner")) {
 5000                 printf("xpt_config: failed to create rescan thread.\n");
 5001         }
 5002 }
 5003 
 5004 void
 5005 xpt_hold_boot(void)
 5006 {
 5007         xpt_lock_buses();
 5008         xsoftc.buses_to_config++;
 5009         xpt_unlock_buses();
 5010 }
 5011 
 5012 void
 5013 xpt_release_boot(void)
 5014 {
 5015         xpt_lock_buses();
 5016         xsoftc.buses_to_config--;
 5017         if (xsoftc.buses_to_config == 0 && xsoftc.buses_config_done == 0) {
 5018                 struct  xpt_task *task;
 5019 
 5020                 xsoftc.buses_config_done = 1;
 5021                 xpt_unlock_buses();
 5022                 /* Call manually because we don't have any busses */
 5023                 task = malloc(sizeof(struct xpt_task), M_CAMXPT, M_NOWAIT);
 5024                 if (task != NULL) {
 5025                         TASK_INIT(&task->task, 0, xpt_finishconfig_task, task);
 5026                         taskqueue_enqueue(taskqueue_thread, &task->task);
 5027                 }
 5028         } else
 5029                 xpt_unlock_buses();
 5030 }
 5031 
 5032 /*
 5033  * If the given device only has one peripheral attached to it, and if that
 5034  * peripheral is the passthrough driver, announce it.  This insures that the
 5035  * user sees some sort of announcement for every peripheral in their system.
 5036  */
 5037 static int
 5038 xptpassannouncefunc(struct cam_ed *device, void *arg)
 5039 {
 5040         struct cam_periph *periph;
 5041         int i;
 5042 
 5043         for (periph = SLIST_FIRST(&device->periphs), i = 0; periph != NULL;
 5044              periph = SLIST_NEXT(periph, periph_links), i++);
 5045 
 5046         periph = SLIST_FIRST(&device->periphs);
 5047         if ((i == 1)
 5048          && (strncmp(periph->periph_name, "pass", 4) == 0))
 5049                 xpt_announce_periph(periph, NULL);
 5050 
 5051         return(1);
 5052 }
 5053 
 5054 static void
 5055 xpt_finishconfig_task(void *context, int pending)
 5056 {
 5057 
 5058         periphdriver_init(2);
 5059         /*
 5060          * Check for devices with no "standard" peripheral driver
 5061          * attached.  For any devices like that, announce the
 5062          * passthrough driver so the user will see something.
 5063          */
 5064         if (!bootverbose)
 5065                 xpt_for_all_devices(xptpassannouncefunc, NULL);
 5066 
 5067         /* Release our hook so that the boot can continue. */
 5068         config_intrhook_disestablish(xsoftc.xpt_config_hook);
 5069         free(xsoftc.xpt_config_hook, M_CAMXPT);
 5070         xsoftc.xpt_config_hook = NULL;
 5071 
 5072         free(context, M_CAMXPT);
 5073 }
 5074 
 5075 cam_status
 5076 xpt_register_async(int event, ac_callback_t *cbfunc, void *cbarg,
 5077                    struct cam_path *path)
 5078 {
 5079         struct ccb_setasync csa;
 5080         cam_status status;
 5081         int xptpath = 0;
 5082 
 5083         if (path == NULL) {
 5084                 status = xpt_create_path(&path, /*periph*/NULL, CAM_XPT_PATH_ID,
 5085                                          CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
 5086                 if (status != CAM_REQ_CMP)
 5087                         return (status);
 5088                 xpt_path_lock(path);
 5089                 xptpath = 1;
 5090         }
 5091 
 5092         xpt_setup_ccb(&csa.ccb_h, path, CAM_PRIORITY_NORMAL);
 5093         csa.ccb_h.func_code = XPT_SASYNC_CB;
 5094         csa.event_enable = event;
 5095         csa.callback = cbfunc;
 5096         csa.callback_arg = cbarg;
 5097         xpt_action((union ccb *)&csa);
 5098         status = csa.ccb_h.status;
 5099 
 5100         if (xptpath) {
 5101                 xpt_path_unlock(path);
 5102                 xpt_free_path(path);
 5103         }
 5104 
 5105         if ((status == CAM_REQ_CMP) &&
 5106             (csa.event_enable & AC_FOUND_DEVICE)) {
 5107                 /*
 5108                  * Get this peripheral up to date with all
 5109                  * the currently existing devices.
 5110                  */
 5111                 xpt_for_all_devices(xptsetasyncfunc, &csa);
 5112         }
 5113         if ((status == CAM_REQ_CMP) &&
 5114             (csa.event_enable & AC_PATH_REGISTERED)) {
 5115                 /*
 5116                  * Get this peripheral up to date with all
 5117                  * the currently existing busses.
 5118                  */
 5119                 xpt_for_all_busses(xptsetasyncbusfunc, &csa);
 5120         }
 5121 
 5122         return (status);
 5123 }
 5124 
 5125 static void
 5126 xptaction(struct cam_sim *sim, union ccb *work_ccb)
 5127 {
 5128         CAM_DEBUG(work_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xptaction\n"));
 5129 
 5130         switch (work_ccb->ccb_h.func_code) {
 5131         /* Common cases first */
 5132         case XPT_PATH_INQ:              /* Path routing inquiry */
 5133         {
 5134                 struct ccb_pathinq *cpi;
 5135 
 5136                 cpi = &work_ccb->cpi;
 5137                 cpi->version_num = 1; /* XXX??? */
 5138                 cpi->hba_inquiry = 0;
 5139                 cpi->target_sprt = 0;
 5140                 cpi->hba_misc = 0;
 5141                 cpi->hba_eng_cnt = 0;
 5142                 cpi->max_target = 0;
 5143                 cpi->max_lun = 0;
 5144                 cpi->initiator_id = 0;
 5145                 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
 5146                 strlcpy(cpi->hba_vid, "", HBA_IDLEN);
 5147                 strlcpy(cpi->dev_name, sim->sim_name, DEV_IDLEN);
 5148                 cpi->unit_number = sim->unit_number;
 5149                 cpi->bus_id = sim->bus_id;
 5150                 cpi->base_transfer_speed = 0;
 5151                 cpi->protocol = PROTO_UNSPECIFIED;
 5152                 cpi->protocol_version = PROTO_VERSION_UNSPECIFIED;
 5153                 cpi->transport = XPORT_UNSPECIFIED;
 5154                 cpi->transport_version = XPORT_VERSION_UNSPECIFIED;
 5155                 cpi->ccb_h.status = CAM_REQ_CMP;
 5156                 xpt_done(work_ccb);
 5157                 break;
 5158         }
 5159         default:
 5160                 work_ccb->ccb_h.status = CAM_REQ_INVALID;
 5161                 xpt_done(work_ccb);
 5162                 break;
 5163         }
 5164 }
 5165 
 5166 /*
 5167  * The xpt as a "controller" has no interrupt sources, so polling
 5168  * is a no-op.
 5169  */
 5170 static void
 5171 xptpoll(struct cam_sim *sim)
 5172 {
 5173 }
 5174 
 5175 void
 5176 xpt_lock_buses(void)
 5177 {
 5178         mtx_lock(&xsoftc.xpt_topo_lock);
 5179 }
 5180 
 5181 void
 5182 xpt_unlock_buses(void)
 5183 {
 5184         mtx_unlock(&xsoftc.xpt_topo_lock);
 5185 }
 5186 
 5187 struct mtx *
 5188 xpt_path_mtx(struct cam_path *path)
 5189 {
 5190 
 5191         return (&path->device->device_mtx);
 5192 }
 5193 
 5194 static void
 5195 xpt_done_process(struct ccb_hdr *ccb_h)
 5196 {
 5197         struct cam_sim *sim;
 5198         struct cam_devq *devq;
 5199         struct mtx *mtx = NULL;
 5200 
 5201         if (ccb_h->flags & CAM_HIGH_POWER) {
 5202                 struct highpowerlist    *hphead;
 5203                 struct cam_ed           *device;
 5204 
 5205                 mtx_lock(&xsoftc.xpt_highpower_lock);
 5206                 hphead = &xsoftc.highpowerq;
 5207 
 5208                 device = STAILQ_FIRST(hphead);
 5209 
 5210                 /*
 5211                  * Increment the count since this command is done.
 5212                  */
 5213                 xsoftc.num_highpower++;
 5214 
 5215                 /*
 5216                  * Any high powered commands queued up?
 5217                  */
 5218                 if (device != NULL) {
 5219 
 5220                         STAILQ_REMOVE_HEAD(hphead, highpowerq_entry);
 5221                         mtx_unlock(&xsoftc.xpt_highpower_lock);
 5222 
 5223                         mtx_lock(&device->sim->devq->send_mtx);
 5224                         xpt_release_devq_device(device,
 5225                                          /*count*/1, /*runqueue*/TRUE);
 5226                         mtx_unlock(&device->sim->devq->send_mtx);
 5227                 } else
 5228                         mtx_unlock(&xsoftc.xpt_highpower_lock);
 5229         }
 5230 
 5231         sim = ccb_h->path->bus->sim;
 5232 
 5233         if (ccb_h->status & CAM_RELEASE_SIMQ) {
 5234                 xpt_release_simq(sim, /*run_queue*/FALSE);
 5235                 ccb_h->status &= ~CAM_RELEASE_SIMQ;
 5236         }
 5237 
 5238         if ((ccb_h->flags & CAM_DEV_QFRZDIS)
 5239          && (ccb_h->status & CAM_DEV_QFRZN)) {
 5240                 xpt_release_devq(ccb_h->path, /*count*/1, /*run_queue*/TRUE);
 5241                 ccb_h->status &= ~CAM_DEV_QFRZN;
 5242         }
 5243 
 5244         devq = sim->devq;
 5245         if ((ccb_h->func_code & XPT_FC_USER_CCB) == 0) {
 5246                 struct cam_ed *dev = ccb_h->path->device;
 5247 
 5248                 mtx_lock(&devq->send_mtx);
 5249                 devq->send_active--;
 5250                 devq->send_openings++;
 5251                 cam_ccbq_ccb_done(&dev->ccbq, (union ccb *)ccb_h);
 5252 
 5253                 if (((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
 5254                   && (dev->ccbq.dev_active == 0))) {
 5255                         dev->flags &= ~CAM_DEV_REL_ON_QUEUE_EMPTY;
 5256                         xpt_release_devq_device(dev, /*count*/1,
 5257                                          /*run_queue*/FALSE);
 5258                 }
 5259 
 5260                 if (((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0
 5261                   && (ccb_h->status&CAM_STATUS_MASK) != CAM_REQUEUE_REQ)) {
 5262                         dev->flags &= ~CAM_DEV_REL_ON_COMPLETE;
 5263                         xpt_release_devq_device(dev, /*count*/1,
 5264                                          /*run_queue*/FALSE);
 5265                 }
 5266 
 5267                 if (!device_is_queued(dev))
 5268                         (void)xpt_schedule_devq(devq, dev);
 5269                 xpt_run_devq(devq);
 5270                 mtx_unlock(&devq->send_mtx);
 5271 
 5272                 if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0) {
 5273                         mtx = xpt_path_mtx(ccb_h->path);
 5274                         mtx_lock(mtx);
 5275 
 5276                         if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
 5277                          && (--dev->tag_delay_count == 0))
 5278                                 xpt_start_tags(ccb_h->path);
 5279                 }
 5280         }
 5281 
 5282         if ((ccb_h->flags & CAM_UNLOCKED) == 0) {
 5283                 if (mtx == NULL) {
 5284                         mtx = xpt_path_mtx(ccb_h->path);
 5285                         mtx_lock(mtx);
 5286                 }
 5287         } else {
 5288                 if (mtx != NULL) {
 5289                         mtx_unlock(mtx);
 5290                         mtx = NULL;
 5291                 }
 5292         }
 5293 
 5294         /* Call the peripheral driver's callback */
 5295         ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
 5296         (*ccb_h->cbfcnp)(ccb_h->path->periph, (union ccb *)ccb_h);
 5297         if (mtx != NULL)
 5298                 mtx_unlock(mtx);
 5299 }
 5300 
 5301 void
 5302 xpt_done_td(void *arg)
 5303 {
 5304         struct cam_doneq *queue = arg;
 5305         struct ccb_hdr *ccb_h;
 5306         STAILQ_HEAD(, ccb_hdr)  doneq;
 5307 
 5308         STAILQ_INIT(&doneq);
 5309         mtx_lock(&queue->cam_doneq_mtx);
 5310         while (1) {
 5311                 while (STAILQ_EMPTY(&queue->cam_doneq)) {
 5312                         queue->cam_doneq_sleep = 1;
 5313                         msleep(&queue->cam_doneq, &queue->cam_doneq_mtx,
 5314                             PRIBIO, "-", 0);
 5315                         queue->cam_doneq_sleep = 0;
 5316                 }
 5317                 STAILQ_CONCAT(&doneq, &queue->cam_doneq);
 5318                 mtx_unlock(&queue->cam_doneq_mtx);
 5319 
 5320                 THREAD_NO_SLEEPING();
 5321                 while ((ccb_h = STAILQ_FIRST(&doneq)) != NULL) {
 5322                         STAILQ_REMOVE_HEAD(&doneq, sim_links.stqe);
 5323                         xpt_done_process(ccb_h);
 5324                 }
 5325                 THREAD_SLEEPING_OK();
 5326 
 5327                 mtx_lock(&queue->cam_doneq_mtx);
 5328         }
 5329 }
 5330 
 5331 static void
 5332 camisr_runqueue(void)
 5333 {
 5334         struct  ccb_hdr *ccb_h;
 5335         struct cam_doneq *queue;
 5336         int i;
 5337 
 5338         /* Process global queues. */
 5339         for (i = 0; i < cam_num_doneqs; i++) {
 5340                 queue = &cam_doneqs[i];
 5341                 mtx_lock(&queue->cam_doneq_mtx);
 5342                 while ((ccb_h = STAILQ_FIRST(&queue->cam_doneq)) != NULL) {
 5343                         STAILQ_REMOVE_HEAD(&queue->cam_doneq, sim_links.stqe);
 5344                         mtx_unlock(&queue->cam_doneq_mtx);
 5345                         xpt_done_process(ccb_h);
 5346                         mtx_lock(&queue->cam_doneq_mtx);
 5347                 }
 5348                 mtx_unlock(&queue->cam_doneq_mtx);
 5349         }
 5350 }

Cache object: 87fd776b04bc11189c4d92f3fb91715c


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.