The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/cam/cam_xpt.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Implementation of the Common Access Method Transport (XPT) layer.
    3  *
    4  * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs.
    5  * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry.
    6  * All rights reserved.
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions, and the following disclaimer,
   13  *    without modification, immediately at the beginning of the file.
   14  * 2. The name of the author may not be used to endorse or promote products
   15  *    derived from this software without specific prior written permission.
   16  *
   17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   20  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
   21  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   27  * SUCH DAMAGE.
   28  */
   29 
   30 #include <sys/cdefs.h>
   31 __FBSDID("$FreeBSD: releng/10.1/sys/cam/cam_xpt.c 265635 2014-05-08 07:01:54Z mav $");
   32 
   33 #include <sys/param.h>
   34 #include <sys/bus.h>
   35 #include <sys/systm.h>
   36 #include <sys/types.h>
   37 #include <sys/malloc.h>
   38 #include <sys/kernel.h>
   39 #include <sys/time.h>
   40 #include <sys/conf.h>
   41 #include <sys/fcntl.h>
   42 #include <sys/interrupt.h>
   43 #include <sys/proc.h>
   44 #include <sys/sbuf.h>
   45 #include <sys/smp.h>
   46 #include <sys/taskqueue.h>
   47 
   48 #include <sys/lock.h>
   49 #include <sys/mutex.h>
   50 #include <sys/sysctl.h>
   51 #include <sys/kthread.h>
   52 
   53 #include <cam/cam.h>
   54 #include <cam/cam_ccb.h>
   55 #include <cam/cam_periph.h>
   56 #include <cam/cam_queue.h>
   57 #include <cam/cam_sim.h>
   58 #include <cam/cam_xpt.h>
   59 #include <cam/cam_xpt_sim.h>
   60 #include <cam/cam_xpt_periph.h>
   61 #include <cam/cam_xpt_internal.h>
   62 #include <cam/cam_debug.h>
   63 #include <cam/cam_compat.h>
   64 
   65 #include <cam/scsi/scsi_all.h>
   66 #include <cam/scsi/scsi_message.h>
   67 #include <cam/scsi/scsi_pass.h>
   68 
   69 #include <machine/md_var.h>     /* geometry translation */
   70 #include <machine/stdarg.h>     /* for xpt_print below */
   71 
   72 #include "opt_cam.h"
   73 
   74 /*
   75  * This is the maximum number of high powered commands (e.g. start unit)
   76  * that can be outstanding at a particular time.
   77  */
   78 #ifndef CAM_MAX_HIGHPOWER
   79 #define CAM_MAX_HIGHPOWER  4
   80 #endif
   81 
   82 /* Datastructures internal to the xpt layer */
   83 MALLOC_DEFINE(M_CAMXPT, "CAM XPT", "CAM XPT buffers");
   84 MALLOC_DEFINE(M_CAMDEV, "CAM DEV", "CAM devices");
   85 MALLOC_DEFINE(M_CAMCCB, "CAM CCB", "CAM CCBs");
   86 MALLOC_DEFINE(M_CAMPATH, "CAM path", "CAM paths");
   87 
   88 /* Object for defering XPT actions to a taskqueue */
   89 struct xpt_task {
   90         struct task     task;
   91         void            *data1;
   92         uintptr_t       data2;
   93 };
   94 
   95 struct xpt_softc {
   96         /* number of high powered commands that can go through right now */
   97         struct mtx              xpt_highpower_lock;
   98         STAILQ_HEAD(highpowerlist, cam_ed)      highpowerq;
   99         int                     num_highpower;
  100 
  101         /* queue for handling async rescan requests. */
  102         TAILQ_HEAD(, ccb_hdr) ccb_scanq;
  103         int buses_to_config;
  104         int buses_config_done;
  105 
  106         /* Registered busses */
  107         TAILQ_HEAD(,cam_eb)     xpt_busses;
  108         u_int                   bus_generation;
  109 
  110         struct intr_config_hook *xpt_config_hook;
  111 
  112         int                     boot_delay;
  113         struct callout          boot_callout;
  114 
  115         struct mtx              xpt_topo_lock;
  116         struct mtx              xpt_lock;
  117         struct taskqueue        *xpt_taskq;
  118 };
  119 
  120 typedef enum {
  121         DM_RET_COPY             = 0x01,
  122         DM_RET_FLAG_MASK        = 0x0f,
  123         DM_RET_NONE             = 0x00,
  124         DM_RET_STOP             = 0x10,
  125         DM_RET_DESCEND          = 0x20,
  126         DM_RET_ERROR            = 0x30,
  127         DM_RET_ACTION_MASK      = 0xf0
  128 } dev_match_ret;
  129 
  130 typedef enum {
  131         XPT_DEPTH_BUS,
  132         XPT_DEPTH_TARGET,
  133         XPT_DEPTH_DEVICE,
  134         XPT_DEPTH_PERIPH
  135 } xpt_traverse_depth;
  136 
  137 struct xpt_traverse_config {
  138         xpt_traverse_depth      depth;
  139         void                    *tr_func;
  140         void                    *tr_arg;
  141 };
  142 
  143 typedef int     xpt_busfunc_t (struct cam_eb *bus, void *arg);
  144 typedef int     xpt_targetfunc_t (struct cam_et *target, void *arg);
  145 typedef int     xpt_devicefunc_t (struct cam_ed *device, void *arg);
  146 typedef int     xpt_periphfunc_t (struct cam_periph *periph, void *arg);
  147 typedef int     xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg);
  148 
  149 /* Transport layer configuration information */
  150 static struct xpt_softc xsoftc;
  151 
  152 TUNABLE_INT("kern.cam.boot_delay", &xsoftc.boot_delay);
  153 SYSCTL_INT(_kern_cam, OID_AUTO, boot_delay, CTLFLAG_RDTUN,
  154            &xsoftc.boot_delay, 0, "Bus registration wait time");
  155 
  156 struct cam_doneq {
  157         struct mtx_padalign     cam_doneq_mtx;
  158         STAILQ_HEAD(, ccb_hdr)  cam_doneq;
  159         int                     cam_doneq_sleep;
  160 };
  161 
  162 static struct cam_doneq cam_doneqs[MAXCPU];
  163 static int cam_num_doneqs;
  164 static struct proc *cam_proc;
  165 
  166 TUNABLE_INT("kern.cam.num_doneqs", &cam_num_doneqs);
  167 SYSCTL_INT(_kern_cam, OID_AUTO, num_doneqs, CTLFLAG_RDTUN,
  168            &cam_num_doneqs, 0, "Number of completion queues/threads");
  169 
  170 struct cam_periph *xpt_periph;
  171 
  172 static periph_init_t xpt_periph_init;
  173 
  174 static struct periph_driver xpt_driver =
  175 {
  176         xpt_periph_init, "xpt",
  177         TAILQ_HEAD_INITIALIZER(xpt_driver.units), /* generation */ 0,
  178         CAM_PERIPH_DRV_EARLY
  179 };
  180 
  181 PERIPHDRIVER_DECLARE(xpt, xpt_driver);
  182 
  183 static d_open_t xptopen;
  184 static d_close_t xptclose;
  185 static d_ioctl_t xptioctl;
  186 static d_ioctl_t xptdoioctl;
  187 
  188 static struct cdevsw xpt_cdevsw = {
  189         .d_version =    D_VERSION,
  190         .d_flags =      0,
  191         .d_open =       xptopen,
  192         .d_close =      xptclose,
  193         .d_ioctl =      xptioctl,
  194         .d_name =       "xpt",
  195 };
  196 
  197 /* Storage for debugging datastructures */
  198 struct cam_path *cam_dpath;
  199 u_int32_t cam_dflags = CAM_DEBUG_FLAGS;
  200 TUNABLE_INT("kern.cam.dflags", &cam_dflags);
  201 SYSCTL_UINT(_kern_cam, OID_AUTO, dflags, CTLFLAG_RW,
  202         &cam_dflags, 0, "Enabled debug flags");
  203 u_int32_t cam_debug_delay = CAM_DEBUG_DELAY;
  204 TUNABLE_INT("kern.cam.debug_delay", &cam_debug_delay);
  205 SYSCTL_UINT(_kern_cam, OID_AUTO, debug_delay, CTLFLAG_RW,
  206         &cam_debug_delay, 0, "Delay in us after each debug message");
  207 
  208 /* Our boot-time initialization hook */
  209 static int cam_module_event_handler(module_t, int /*modeventtype_t*/, void *);
  210 
  211 static moduledata_t cam_moduledata = {
  212         "cam",
  213         cam_module_event_handler,
  214         NULL
  215 };
  216 
  217 static int      xpt_init(void *);
  218 
  219 DECLARE_MODULE(cam, cam_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND);
  220 MODULE_VERSION(cam, 1);
  221 
  222 
  223 static void             xpt_async_bcast(struct async_list *async_head,
  224                                         u_int32_t async_code,
  225                                         struct cam_path *path,
  226                                         void *async_arg);
  227 static path_id_t xptnextfreepathid(void);
  228 static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus);
  229 static union ccb *xpt_get_ccb(struct cam_periph *periph);
  230 static union ccb *xpt_get_ccb_nowait(struct cam_periph *periph);
  231 static void      xpt_run_allocq(struct cam_periph *periph, int sleep);
  232 static void      xpt_run_allocq_task(void *context, int pending);
  233 static void      xpt_run_devq(struct cam_devq *devq);
  234 static timeout_t xpt_release_devq_timeout;
  235 static void      xpt_release_simq_timeout(void *arg) __unused;
  236 static void      xpt_acquire_bus(struct cam_eb *bus);
  237 static void      xpt_release_bus(struct cam_eb *bus);
  238 static uint32_t  xpt_freeze_devq_device(struct cam_ed *dev, u_int count);
  239 static int       xpt_release_devq_device(struct cam_ed *dev, u_int count,
  240                     int run_queue);
  241 static struct cam_et*
  242                  xpt_alloc_target(struct cam_eb *bus, target_id_t target_id);
  243 static void      xpt_acquire_target(struct cam_et *target);
  244 static void      xpt_release_target(struct cam_et *target);
  245 static struct cam_eb*
  246                  xpt_find_bus(path_id_t path_id);
  247 static struct cam_et*
  248                  xpt_find_target(struct cam_eb *bus, target_id_t target_id);
  249 static struct cam_ed*
  250                  xpt_find_device(struct cam_et *target, lun_id_t lun_id);
  251 static void      xpt_config(void *arg);
  252 static int       xpt_schedule_dev(struct camq *queue, cam_pinfo *dev_pinfo,
  253                                  u_int32_t new_priority);
  254 static xpt_devicefunc_t xptpassannouncefunc;
  255 static void      xptaction(struct cam_sim *sim, union ccb *work_ccb);
  256 static void      xptpoll(struct cam_sim *sim);
  257 static void      camisr_runqueue(void);
  258 static void      xpt_done_process(struct ccb_hdr *ccb_h);
  259 static void      xpt_done_td(void *);
  260 static dev_match_ret    xptbusmatch(struct dev_match_pattern *patterns,
  261                                     u_int num_patterns, struct cam_eb *bus);
  262 static dev_match_ret    xptdevicematch(struct dev_match_pattern *patterns,
  263                                        u_int num_patterns,
  264                                        struct cam_ed *device);
  265 static dev_match_ret    xptperiphmatch(struct dev_match_pattern *patterns,
  266                                        u_int num_patterns,
  267                                        struct cam_periph *periph);
  268 static xpt_busfunc_t    xptedtbusfunc;
  269 static xpt_targetfunc_t xptedttargetfunc;
  270 static xpt_devicefunc_t xptedtdevicefunc;
  271 static xpt_periphfunc_t xptedtperiphfunc;
  272 static xpt_pdrvfunc_t   xptplistpdrvfunc;
  273 static xpt_periphfunc_t xptplistperiphfunc;
  274 static int              xptedtmatch(struct ccb_dev_match *cdm);
  275 static int              xptperiphlistmatch(struct ccb_dev_match *cdm);
  276 static int              xptbustraverse(struct cam_eb *start_bus,
  277                                        xpt_busfunc_t *tr_func, void *arg);
  278 static int              xpttargettraverse(struct cam_eb *bus,
  279                                           struct cam_et *start_target,
  280                                           xpt_targetfunc_t *tr_func, void *arg);
  281 static int              xptdevicetraverse(struct cam_et *target,
  282                                           struct cam_ed *start_device,
  283                                           xpt_devicefunc_t *tr_func, void *arg);
  284 static int              xptperiphtraverse(struct cam_ed *device,
  285                                           struct cam_periph *start_periph,
  286                                           xpt_periphfunc_t *tr_func, void *arg);
  287 static int              xptpdrvtraverse(struct periph_driver **start_pdrv,
  288                                         xpt_pdrvfunc_t *tr_func, void *arg);
  289 static int              xptpdperiphtraverse(struct periph_driver **pdrv,
  290                                             struct cam_periph *start_periph,
  291                                             xpt_periphfunc_t *tr_func,
  292                                             void *arg);
  293 static xpt_busfunc_t    xptdefbusfunc;
  294 static xpt_targetfunc_t xptdeftargetfunc;
  295 static xpt_devicefunc_t xptdefdevicefunc;
  296 static xpt_periphfunc_t xptdefperiphfunc;
  297 static void             xpt_finishconfig_task(void *context, int pending);
  298 static void             xpt_dev_async_default(u_int32_t async_code,
  299                                               struct cam_eb *bus,
  300                                               struct cam_et *target,
  301                                               struct cam_ed *device,
  302                                               void *async_arg);
  303 static struct cam_ed *  xpt_alloc_device_default(struct cam_eb *bus,
  304                                                  struct cam_et *target,
  305                                                  lun_id_t lun_id);
  306 static xpt_devicefunc_t xptsetasyncfunc;
  307 static xpt_busfunc_t    xptsetasyncbusfunc;
  308 static cam_status       xptregister(struct cam_periph *periph,
  309                                     void *arg);
  310 static __inline int device_is_queued(struct cam_ed *device);
  311 
  312 static __inline int
  313 xpt_schedule_devq(struct cam_devq *devq, struct cam_ed *dev)
  314 {
  315         int     retval;
  316 
  317         mtx_assert(&devq->send_mtx, MA_OWNED);
  318         if ((dev->ccbq.queue.entries > 0) &&
  319             (dev->ccbq.dev_openings > 0) &&
  320             (dev->ccbq.queue.qfrozen_cnt == 0)) {
  321                 /*
  322                  * The priority of a device waiting for controller
  323                  * resources is that of the highest priority CCB
  324                  * enqueued.
  325                  */
  326                 retval =
  327                     xpt_schedule_dev(&devq->send_queue,
  328                                      &dev->devq_entry,
  329                                      CAMQ_GET_PRIO(&dev->ccbq.queue));
  330         } else {
  331                 retval = 0;
  332         }
  333         return (retval);
  334 }
  335 
  336 static __inline int
  337 device_is_queued(struct cam_ed *device)
  338 {
  339         return (device->devq_entry.index != CAM_UNQUEUED_INDEX);
  340 }
  341 
  342 static void
  343 xpt_periph_init()
  344 {
  345         make_dev(&xpt_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "xpt0");
  346 }
  347 
  348 static int
  349 xptopen(struct cdev *dev, int flags, int fmt, struct thread *td)
  350 {
  351 
  352         /*
  353          * Only allow read-write access.
  354          */
  355         if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0))
  356                 return(EPERM);
  357 
  358         /*
  359          * We don't allow nonblocking access.
  360          */
  361         if ((flags & O_NONBLOCK) != 0) {
  362                 printf("%s: can't do nonblocking access\n", devtoname(dev));
  363                 return(ENODEV);
  364         }
  365 
  366         return(0);
  367 }
  368 
  369 static int
  370 xptclose(struct cdev *dev, int flag, int fmt, struct thread *td)
  371 {
  372 
  373         return(0);
  374 }
  375 
  376 /*
  377  * Don't automatically grab the xpt softc lock here even though this is going
  378  * through the xpt device.  The xpt device is really just a back door for
  379  * accessing other devices and SIMs, so the right thing to do is to grab
  380  * the appropriate SIM lock once the bus/SIM is located.
  381  */
  382 static int
  383 xptioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
  384 {
  385         int error;
  386 
  387         if ((error = xptdoioctl(dev, cmd, addr, flag, td)) == ENOTTY) {
  388                 error = cam_compat_ioctl(dev, cmd, addr, flag, td, xptdoioctl);
  389         }
  390         return (error);
  391 }
  392         
  393 static int
  394 xptdoioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
  395 {
  396         int error;
  397 
  398         error = 0;
  399 
  400         switch(cmd) {
  401         /*
  402          * For the transport layer CAMIOCOMMAND ioctl, we really only want
  403          * to accept CCB types that don't quite make sense to send through a
  404          * passthrough driver. XPT_PATH_INQ is an exception to this, as stated
  405          * in the CAM spec.
  406          */
  407         case CAMIOCOMMAND: {
  408                 union ccb *ccb;
  409                 union ccb *inccb;
  410                 struct cam_eb *bus;
  411 
  412                 inccb = (union ccb *)addr;
  413 
  414                 bus = xpt_find_bus(inccb->ccb_h.path_id);
  415                 if (bus == NULL)
  416                         return (EINVAL);
  417 
  418                 switch (inccb->ccb_h.func_code) {
  419                 case XPT_SCAN_BUS:
  420                 case XPT_RESET_BUS:
  421                         if (inccb->ccb_h.target_id != CAM_TARGET_WILDCARD ||
  422                             inccb->ccb_h.target_lun != CAM_LUN_WILDCARD) {
  423                                 xpt_release_bus(bus);
  424                                 return (EINVAL);
  425                         }
  426                         break;
  427                 case XPT_SCAN_TGT:
  428                         if (inccb->ccb_h.target_id == CAM_TARGET_WILDCARD ||
  429                             inccb->ccb_h.target_lun != CAM_LUN_WILDCARD) {
  430                                 xpt_release_bus(bus);
  431                                 return (EINVAL);
  432                         }
  433                         break;
  434                 default:
  435                         break;
  436                 }
  437 
  438                 switch(inccb->ccb_h.func_code) {
  439                 case XPT_SCAN_BUS:
  440                 case XPT_RESET_BUS:
  441                 case XPT_PATH_INQ:
  442                 case XPT_ENG_INQ:
  443                 case XPT_SCAN_LUN:
  444                 case XPT_SCAN_TGT:
  445 
  446                         ccb = xpt_alloc_ccb();
  447 
  448                         /*
  449                          * Create a path using the bus, target, and lun the
  450                          * user passed in.
  451                          */
  452                         if (xpt_create_path(&ccb->ccb_h.path, NULL,
  453                                             inccb->ccb_h.path_id,
  454                                             inccb->ccb_h.target_id,
  455                                             inccb->ccb_h.target_lun) !=
  456                                             CAM_REQ_CMP){
  457                                 error = EINVAL;
  458                                 xpt_free_ccb(ccb);
  459                                 break;
  460                         }
  461                         /* Ensure all of our fields are correct */
  462                         xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path,
  463                                       inccb->ccb_h.pinfo.priority);
  464                         xpt_merge_ccb(ccb, inccb);
  465                         xpt_path_lock(ccb->ccb_h.path);
  466                         cam_periph_runccb(ccb, NULL, 0, 0, NULL);
  467                         xpt_path_unlock(ccb->ccb_h.path);
  468                         bcopy(ccb, inccb, sizeof(union ccb));
  469                         xpt_free_path(ccb->ccb_h.path);
  470                         xpt_free_ccb(ccb);
  471                         break;
  472 
  473                 case XPT_DEBUG: {
  474                         union ccb ccb;
  475 
  476                         /*
  477                          * This is an immediate CCB, so it's okay to
  478                          * allocate it on the stack.
  479                          */
  480 
  481                         /*
  482                          * Create a path using the bus, target, and lun the
  483                          * user passed in.
  484                          */
  485                         if (xpt_create_path(&ccb.ccb_h.path, NULL,
  486                                             inccb->ccb_h.path_id,
  487                                             inccb->ccb_h.target_id,
  488                                             inccb->ccb_h.target_lun) !=
  489                                             CAM_REQ_CMP){
  490                                 error = EINVAL;
  491                                 break;
  492                         }
  493                         /* Ensure all of our fields are correct */
  494                         xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path,
  495                                       inccb->ccb_h.pinfo.priority);
  496                         xpt_merge_ccb(&ccb, inccb);
  497                         xpt_action(&ccb);
  498                         bcopy(&ccb, inccb, sizeof(union ccb));
  499                         xpt_free_path(ccb.ccb_h.path);
  500                         break;
  501 
  502                 }
  503                 case XPT_DEV_MATCH: {
  504                         struct cam_periph_map_info mapinfo;
  505                         struct cam_path *old_path;
  506 
  507                         /*
  508                          * We can't deal with physical addresses for this
  509                          * type of transaction.
  510                          */
  511                         if ((inccb->ccb_h.flags & CAM_DATA_MASK) !=
  512                             CAM_DATA_VADDR) {
  513                                 error = EINVAL;
  514                                 break;
  515                         }
  516 
  517                         /*
  518                          * Save this in case the caller had it set to
  519                          * something in particular.
  520                          */
  521                         old_path = inccb->ccb_h.path;
  522 
  523                         /*
  524                          * We really don't need a path for the matching
  525                          * code.  The path is needed because of the
  526                          * debugging statements in xpt_action().  They
  527                          * assume that the CCB has a valid path.
  528                          */
  529                         inccb->ccb_h.path = xpt_periph->path;
  530 
  531                         bzero(&mapinfo, sizeof(mapinfo));
  532 
  533                         /*
  534                          * Map the pattern and match buffers into kernel
  535                          * virtual address space.
  536                          */
  537                         error = cam_periph_mapmem(inccb, &mapinfo);
  538 
  539                         if (error) {
  540                                 inccb->ccb_h.path = old_path;
  541                                 break;
  542                         }
  543 
  544                         /*
  545                          * This is an immediate CCB, we can send it on directly.
  546                          */
  547                         xpt_action(inccb);
  548 
  549                         /*
  550                          * Map the buffers back into user space.
  551                          */
  552                         cam_periph_unmapmem(inccb, &mapinfo);
  553 
  554                         inccb->ccb_h.path = old_path;
  555 
  556                         error = 0;
  557                         break;
  558                 }
  559                 default:
  560                         error = ENOTSUP;
  561                         break;
  562                 }
  563                 xpt_release_bus(bus);
  564                 break;
  565         }
  566         /*
  567          * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input,
  568          * with the periphal driver name and unit name filled in.  The other
  569          * fields don't really matter as input.  The passthrough driver name
  570          * ("pass"), and unit number are passed back in the ccb.  The current
  571          * device generation number, and the index into the device peripheral
  572          * driver list, and the status are also passed back.  Note that
  573          * since we do everything in one pass, unlike the XPT_GDEVLIST ccb,
  574          * we never return a status of CAM_GDEVLIST_LIST_CHANGED.  It is
  575          * (or rather should be) impossible for the device peripheral driver
  576          * list to change since we look at the whole thing in one pass, and
  577          * we do it with lock protection.
  578          *
  579          */
  580         case CAMGETPASSTHRU: {
  581                 union ccb *ccb;
  582                 struct cam_periph *periph;
  583                 struct periph_driver **p_drv;
  584                 char   *name;
  585                 u_int unit;
  586                 int base_periph_found;
  587 
  588                 ccb = (union ccb *)addr;
  589                 unit = ccb->cgdl.unit_number;
  590                 name = ccb->cgdl.periph_name;
  591                 base_periph_found = 0;
  592 
  593                 /*
  594                  * Sanity check -- make sure we don't get a null peripheral
  595                  * driver name.
  596                  */
  597                 if (*ccb->cgdl.periph_name == '\0') {
  598                         error = EINVAL;
  599                         break;
  600                 }
  601 
  602                 /* Keep the list from changing while we traverse it */
  603                 xpt_lock_buses();
  604 
  605                 /* first find our driver in the list of drivers */
  606                 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++)
  607                         if (strcmp((*p_drv)->driver_name, name) == 0)
  608                                 break;
  609 
  610                 if (*p_drv == NULL) {
  611                         xpt_unlock_buses();
  612                         ccb->ccb_h.status = CAM_REQ_CMP_ERR;
  613                         ccb->cgdl.status = CAM_GDEVLIST_ERROR;
  614                         *ccb->cgdl.periph_name = '\0';
  615                         ccb->cgdl.unit_number = 0;
  616                         error = ENOENT;
  617                         break;
  618                 }
  619 
  620                 /*
  621                  * Run through every peripheral instance of this driver
  622                  * and check to see whether it matches the unit passed
  623                  * in by the user.  If it does, get out of the loops and
  624                  * find the passthrough driver associated with that
  625                  * peripheral driver.
  626                  */
  627                 for (periph = TAILQ_FIRST(&(*p_drv)->units); periph != NULL;
  628                      periph = TAILQ_NEXT(periph, unit_links)) {
  629 
  630                         if (periph->unit_number == unit)
  631                                 break;
  632                 }
  633                 /*
  634                  * If we found the peripheral driver that the user passed
  635                  * in, go through all of the peripheral drivers for that
  636                  * particular device and look for a passthrough driver.
  637                  */
  638                 if (periph != NULL) {
  639                         struct cam_ed *device;
  640                         int i;
  641 
  642                         base_periph_found = 1;
  643                         device = periph->path->device;
  644                         for (i = 0, periph = SLIST_FIRST(&device->periphs);
  645                              periph != NULL;
  646                              periph = SLIST_NEXT(periph, periph_links), i++) {
  647                                 /*
  648                                  * Check to see whether we have a
  649                                  * passthrough device or not.
  650                                  */
  651                                 if (strcmp(periph->periph_name, "pass") == 0) {
  652                                         /*
  653                                          * Fill in the getdevlist fields.
  654                                          */
  655                                         strcpy(ccb->cgdl.periph_name,
  656                                                periph->periph_name);
  657                                         ccb->cgdl.unit_number =
  658                                                 periph->unit_number;
  659                                         if (SLIST_NEXT(periph, periph_links))
  660                                                 ccb->cgdl.status =
  661                                                         CAM_GDEVLIST_MORE_DEVS;
  662                                         else
  663                                                 ccb->cgdl.status =
  664                                                        CAM_GDEVLIST_LAST_DEVICE;
  665                                         ccb->cgdl.generation =
  666                                                 device->generation;
  667                                         ccb->cgdl.index = i;
  668                                         /*
  669                                          * Fill in some CCB header fields
  670                                          * that the user may want.
  671                                          */
  672                                         ccb->ccb_h.path_id =
  673                                                 periph->path->bus->path_id;
  674                                         ccb->ccb_h.target_id =
  675                                                 periph->path->target->target_id;
  676                                         ccb->ccb_h.target_lun =
  677                                                 periph->path->device->lun_id;
  678                                         ccb->ccb_h.status = CAM_REQ_CMP;
  679                                         break;
  680                                 }
  681                         }
  682                 }
  683 
  684                 /*
  685                  * If the periph is null here, one of two things has
  686                  * happened.  The first possibility is that we couldn't
  687                  * find the unit number of the particular peripheral driver
  688                  * that the user is asking about.  e.g. the user asks for
  689                  * the passthrough driver for "da11".  We find the list of
  690                  * "da" peripherals all right, but there is no unit 11.
  691                  * The other possibility is that we went through the list
  692                  * of peripheral drivers attached to the device structure,
  693                  * but didn't find one with the name "pass".  Either way,
  694                  * we return ENOENT, since we couldn't find something.
  695                  */
  696                 if (periph == NULL) {
  697                         ccb->ccb_h.status = CAM_REQ_CMP_ERR;
  698                         ccb->cgdl.status = CAM_GDEVLIST_ERROR;
  699                         *ccb->cgdl.periph_name = '\0';
  700                         ccb->cgdl.unit_number = 0;
  701                         error = ENOENT;
  702                         /*
  703                          * It is unfortunate that this is even necessary,
  704                          * but there are many, many clueless users out there.
  705                          * If this is true, the user is looking for the
  706                          * passthrough driver, but doesn't have one in his
  707                          * kernel.
  708                          */
  709                         if (base_periph_found == 1) {
  710                                 printf("xptioctl: pass driver is not in the "
  711                                        "kernel\n");
  712                                 printf("xptioctl: put \"device pass\" in "
  713                                        "your kernel config file\n");
  714                         }
  715                 }
  716                 xpt_unlock_buses();
  717                 break;
  718                 }
  719         default:
  720                 error = ENOTTY;
  721                 break;
  722         }
  723 
  724         return(error);
  725 }
  726 
  727 static int
  728 cam_module_event_handler(module_t mod, int what, void *arg)
  729 {
  730         int error;
  731 
  732         switch (what) {
  733         case MOD_LOAD:
  734                 if ((error = xpt_init(NULL)) != 0)
  735                         return (error);
  736                 break;
  737         case MOD_UNLOAD:
  738                 return EBUSY;
  739         default:
  740                 return EOPNOTSUPP;
  741         }
  742 
  743         return 0;
  744 }
  745 
  746 static void
  747 xpt_rescan_done(struct cam_periph *periph, union ccb *done_ccb)
  748 {
  749 
  750         if (done_ccb->ccb_h.ppriv_ptr1 == NULL) {
  751                 xpt_free_path(done_ccb->ccb_h.path);
  752                 xpt_free_ccb(done_ccb);
  753         } else {
  754                 done_ccb->ccb_h.cbfcnp = done_ccb->ccb_h.ppriv_ptr1;
  755                 (*done_ccb->ccb_h.cbfcnp)(periph, done_ccb);
  756         }
  757         xpt_release_boot();
  758 }
  759 
  760 /* thread to handle bus rescans */
  761 static void
  762 xpt_scanner_thread(void *dummy)
  763 {
  764         union ccb       *ccb;
  765         struct cam_path  path;
  766 
  767         xpt_lock_buses();
  768         for (;;) {
  769                 if (TAILQ_EMPTY(&xsoftc.ccb_scanq))
  770                         msleep(&xsoftc.ccb_scanq, &xsoftc.xpt_topo_lock, PRIBIO,
  771                                "-", 0);
  772                 if ((ccb = (union ccb *)TAILQ_FIRST(&xsoftc.ccb_scanq)) != NULL) {
  773                         TAILQ_REMOVE(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe);
  774                         xpt_unlock_buses();
  775 
  776                         /*
  777                          * Since lock can be dropped inside and path freed
  778                          * by completion callback even before return here,
  779                          * take our own path copy for reference.
  780                          */
  781                         xpt_copy_path(&path, ccb->ccb_h.path);
  782                         xpt_path_lock(&path);
  783                         xpt_action(ccb);
  784                         xpt_path_unlock(&path);
  785                         xpt_release_path(&path);
  786 
  787                         xpt_lock_buses();
  788                 }
  789         }
  790 }
  791 
  792 void
  793 xpt_rescan(union ccb *ccb)
  794 {
  795         struct ccb_hdr *hdr;
  796 
  797         /* Prepare request */
  798         if (ccb->ccb_h.path->target->target_id == CAM_TARGET_WILDCARD &&
  799             ccb->ccb_h.path->device->lun_id == CAM_LUN_WILDCARD)
  800                 ccb->ccb_h.func_code = XPT_SCAN_BUS;
  801         else if (ccb->ccb_h.path->target->target_id != CAM_TARGET_WILDCARD &&
  802             ccb->ccb_h.path->device->lun_id == CAM_LUN_WILDCARD)
  803                 ccb->ccb_h.func_code = XPT_SCAN_TGT;
  804         else if (ccb->ccb_h.path->target->target_id != CAM_TARGET_WILDCARD &&
  805             ccb->ccb_h.path->device->lun_id != CAM_LUN_WILDCARD)
  806                 ccb->ccb_h.func_code = XPT_SCAN_LUN;
  807         else {
  808                 xpt_print(ccb->ccb_h.path, "illegal scan path\n");
  809                 xpt_free_path(ccb->ccb_h.path);
  810                 xpt_free_ccb(ccb);
  811                 return;
  812         }
  813         ccb->ccb_h.ppriv_ptr1 = ccb->ccb_h.cbfcnp;
  814         ccb->ccb_h.cbfcnp = xpt_rescan_done;
  815         xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, CAM_PRIORITY_XPT);
  816         /* Don't make duplicate entries for the same paths. */
  817         xpt_lock_buses();
  818         if (ccb->ccb_h.ppriv_ptr1 == NULL) {
  819                 TAILQ_FOREACH(hdr, &xsoftc.ccb_scanq, sim_links.tqe) {
  820                         if (xpt_path_comp(hdr->path, ccb->ccb_h.path) == 0) {
  821                                 wakeup(&xsoftc.ccb_scanq);
  822                                 xpt_unlock_buses();
  823                                 xpt_print(ccb->ccb_h.path, "rescan already queued\n");
  824                                 xpt_free_path(ccb->ccb_h.path);
  825                                 xpt_free_ccb(ccb);
  826                                 return;
  827                         }
  828                 }
  829         }
  830         TAILQ_INSERT_TAIL(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe);
  831         xsoftc.buses_to_config++;
  832         wakeup(&xsoftc.ccb_scanq);
  833         xpt_unlock_buses();
  834 }
  835 
  836 /* Functions accessed by the peripheral drivers */
  837 static int
  838 xpt_init(void *dummy)
  839 {
  840         struct cam_sim *xpt_sim;
  841         struct cam_path *path;
  842         struct cam_devq *devq;
  843         cam_status status;
  844         int error, i;
  845 
  846         TAILQ_INIT(&xsoftc.xpt_busses);
  847         TAILQ_INIT(&xsoftc.ccb_scanq);
  848         STAILQ_INIT(&xsoftc.highpowerq);
  849         xsoftc.num_highpower = CAM_MAX_HIGHPOWER;
  850 
  851         mtx_init(&xsoftc.xpt_lock, "XPT lock", NULL, MTX_DEF);
  852         mtx_init(&xsoftc.xpt_highpower_lock, "XPT highpower lock", NULL, MTX_DEF);
  853         mtx_init(&xsoftc.xpt_topo_lock, "XPT topology lock", NULL, MTX_DEF);
  854         xsoftc.xpt_taskq = taskqueue_create("CAM XPT task", M_WAITOK,
  855             taskqueue_thread_enqueue, /*context*/&xsoftc.xpt_taskq);
  856 
  857 #ifdef CAM_BOOT_DELAY
  858         /*
  859          * Override this value at compile time to assist our users
  860          * who don't use loader to boot a kernel.
  861          */
  862         xsoftc.boot_delay = CAM_BOOT_DELAY;
  863 #endif
  864         /*
  865          * The xpt layer is, itself, the equivelent of a SIM.
  866          * Allow 16 ccbs in the ccb pool for it.  This should
  867          * give decent parallelism when we probe busses and
  868          * perform other XPT functions.
  869          */
  870         devq = cam_simq_alloc(16);
  871         xpt_sim = cam_sim_alloc(xptaction,
  872                                 xptpoll,
  873                                 "xpt",
  874                                 /*softc*/NULL,
  875                                 /*unit*/0,
  876                                 /*mtx*/&xsoftc.xpt_lock,
  877                                 /*max_dev_transactions*/0,
  878                                 /*max_tagged_dev_transactions*/0,
  879                                 devq);
  880         if (xpt_sim == NULL)
  881                 return (ENOMEM);
  882 
  883         mtx_lock(&xsoftc.xpt_lock);
  884         if ((status = xpt_bus_register(xpt_sim, NULL, 0)) != CAM_SUCCESS) {
  885                 mtx_unlock(&xsoftc.xpt_lock);
  886                 printf("xpt_init: xpt_bus_register failed with status %#x,"
  887                        " failing attach\n", status);
  888                 return (EINVAL);
  889         }
  890         mtx_unlock(&xsoftc.xpt_lock);
  891 
  892         /*
  893          * Looking at the XPT from the SIM layer, the XPT is
  894          * the equivelent of a peripheral driver.  Allocate
  895          * a peripheral driver entry for us.
  896          */
  897         if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID,
  898                                       CAM_TARGET_WILDCARD,
  899                                       CAM_LUN_WILDCARD)) != CAM_REQ_CMP) {
  900                 mtx_unlock(&xsoftc.xpt_lock);
  901                 printf("xpt_init: xpt_create_path failed with status %#x,"
  902                        " failing attach\n", status);
  903                 return (EINVAL);
  904         }
  905         xpt_path_lock(path);
  906         cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO,
  907                          path, NULL, 0, xpt_sim);
  908         xpt_path_unlock(path);
  909         xpt_free_path(path);
  910 
  911         if (cam_num_doneqs < 1)
  912                 cam_num_doneqs = 1 + mp_ncpus / 6;
  913         else if (cam_num_doneqs > MAXCPU)
  914                 cam_num_doneqs = MAXCPU;
  915         for (i = 0; i < cam_num_doneqs; i++) {
  916                 mtx_init(&cam_doneqs[i].cam_doneq_mtx, "CAM doneq", NULL,
  917                     MTX_DEF);
  918                 STAILQ_INIT(&cam_doneqs[i].cam_doneq);
  919                 error = kproc_kthread_add(xpt_done_td, &cam_doneqs[i],
  920                     &cam_proc, NULL, 0, 0, "cam", "doneq%d", i);
  921                 if (error != 0) {
  922                         cam_num_doneqs = i;
  923                         break;
  924                 }
  925         }
  926         if (cam_num_doneqs < 1) {
  927                 printf("xpt_init: Cannot init completion queues "
  928                        "- failing attach\n");
  929                 return (ENOMEM);
  930         }
  931         /*
  932          * Register a callback for when interrupts are enabled.
  933          */
  934         xsoftc.xpt_config_hook =
  935             (struct intr_config_hook *)malloc(sizeof(struct intr_config_hook),
  936                                               M_CAMXPT, M_NOWAIT | M_ZERO);
  937         if (xsoftc.xpt_config_hook == NULL) {
  938                 printf("xpt_init: Cannot malloc config hook "
  939                        "- failing attach\n");
  940                 return (ENOMEM);
  941         }
  942         xsoftc.xpt_config_hook->ich_func = xpt_config;
  943         if (config_intrhook_establish(xsoftc.xpt_config_hook) != 0) {
  944                 free (xsoftc.xpt_config_hook, M_CAMXPT);
  945                 printf("xpt_init: config_intrhook_establish failed "
  946                        "- failing attach\n");
  947         }
  948 
  949         return (0);
  950 }
  951 
  952 static cam_status
  953 xptregister(struct cam_periph *periph, void *arg)
  954 {
  955         struct cam_sim *xpt_sim;
  956 
  957         if (periph == NULL) {
  958                 printf("xptregister: periph was NULL!!\n");
  959                 return(CAM_REQ_CMP_ERR);
  960         }
  961 
  962         xpt_sim = (struct cam_sim *)arg;
  963         xpt_sim->softc = periph;
  964         xpt_periph = periph;
  965         periph->softc = NULL;
  966 
  967         return(CAM_REQ_CMP);
  968 }
  969 
  970 int32_t
  971 xpt_add_periph(struct cam_periph *periph)
  972 {
  973         struct cam_ed *device;
  974         int32_t  status;
  975 
  976         TASK_INIT(&periph->periph_run_task, 0, xpt_run_allocq_task, periph);
  977         device = periph->path->device;
  978         status = CAM_REQ_CMP;
  979         if (device != NULL) {
  980                 mtx_lock(&device->target->bus->eb_mtx);
  981                 device->generation++;
  982                 SLIST_INSERT_HEAD(&device->periphs, periph, periph_links);
  983                 mtx_unlock(&device->target->bus->eb_mtx);
  984         }
  985 
  986         return (status);
  987 }
  988 
  989 void
  990 xpt_remove_periph(struct cam_periph *periph)
  991 {
  992         struct cam_ed *device;
  993 
  994         device = periph->path->device;
  995         if (device != NULL) {
  996                 mtx_lock(&device->target->bus->eb_mtx);
  997                 device->generation++;
  998                 SLIST_REMOVE(&device->periphs, periph, cam_periph, periph_links);
  999                 mtx_unlock(&device->target->bus->eb_mtx);
 1000         }
 1001 }
 1002 
 1003 
 1004 void
 1005 xpt_announce_periph(struct cam_periph *periph, char *announce_string)
 1006 {
 1007         struct  cam_path *path = periph->path;
 1008 
 1009         cam_periph_assert(periph, MA_OWNED);
 1010         periph->flags |= CAM_PERIPH_ANNOUNCED;
 1011 
 1012         printf("%s%d at %s%d bus %d scbus%d target %d lun %jx\n",
 1013                periph->periph_name, periph->unit_number,
 1014                path->bus->sim->sim_name,
 1015                path->bus->sim->unit_number,
 1016                path->bus->sim->bus_id,
 1017                path->bus->path_id,
 1018                path->target->target_id,
 1019                (uintmax_t)path->device->lun_id);
 1020         printf("%s%d: ", periph->periph_name, periph->unit_number);
 1021         if (path->device->protocol == PROTO_SCSI)
 1022                 scsi_print_inquiry(&path->device->inq_data);
 1023         else if (path->device->protocol == PROTO_ATA ||
 1024             path->device->protocol == PROTO_SATAPM)
 1025                 ata_print_ident(&path->device->ident_data);
 1026         else if (path->device->protocol == PROTO_SEMB)
 1027                 semb_print_ident(
 1028                     (struct sep_identify_data *)&path->device->ident_data);
 1029         else
 1030                 printf("Unknown protocol device\n");
 1031         if (path->device->serial_num_len > 0) {
 1032                 /* Don't wrap the screen  - print only the first 60 chars */
 1033                 printf("%s%d: Serial Number %.60s\n", periph->periph_name,
 1034                        periph->unit_number, path->device->serial_num);
 1035         }
 1036         /* Announce transport details. */
 1037         (*(path->bus->xport->announce))(periph);
 1038         /* Announce command queueing. */
 1039         if (path->device->inq_flags & SID_CmdQue
 1040          || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
 1041                 printf("%s%d: Command Queueing enabled\n",
 1042                        periph->periph_name, periph->unit_number);
 1043         }
 1044         /* Announce caller's details if they've passed in. */
 1045         if (announce_string != NULL)
 1046                 printf("%s%d: %s\n", periph->periph_name,
 1047                        periph->unit_number, announce_string);
 1048 }
 1049 
 1050 void
 1051 xpt_announce_quirks(struct cam_periph *periph, int quirks, char *bit_string)
 1052 {
 1053         if (quirks != 0) {
 1054                 printf("%s%d: quirks=0x%b\n", periph->periph_name,
 1055                     periph->unit_number, quirks, bit_string);
 1056         }
 1057 }
 1058 
 1059 void
 1060 xpt_denounce_periph(struct cam_periph *periph)
 1061 {
 1062         struct  cam_path *path = periph->path;
 1063 
 1064         cam_periph_assert(periph, MA_OWNED);
 1065         printf("%s%d at %s%d bus %d scbus%d target %d lun %jx\n",
 1066                periph->periph_name, periph->unit_number,
 1067                path->bus->sim->sim_name,
 1068                path->bus->sim->unit_number,
 1069                path->bus->sim->bus_id,
 1070                path->bus->path_id,
 1071                path->target->target_id,
 1072                (uintmax_t)path->device->lun_id);
 1073         printf("%s%d: ", periph->periph_name, periph->unit_number);
 1074         if (path->device->protocol == PROTO_SCSI)
 1075                 scsi_print_inquiry_short(&path->device->inq_data);
 1076         else if (path->device->protocol == PROTO_ATA ||
 1077             path->device->protocol == PROTO_SATAPM)
 1078                 ata_print_ident_short(&path->device->ident_data);
 1079         else if (path->device->protocol == PROTO_SEMB)
 1080                 semb_print_ident_short(
 1081                     (struct sep_identify_data *)&path->device->ident_data);
 1082         else
 1083                 printf("Unknown protocol device");
 1084         if (path->device->serial_num_len > 0)
 1085                 printf(" s/n %.60s", path->device->serial_num);
 1086         printf(" detached\n");
 1087 }
 1088 
 1089 
 1090 int
 1091 xpt_getattr(char *buf, size_t len, const char *attr, struct cam_path *path)
 1092 {
 1093         int ret = -1, l;
 1094         struct ccb_dev_advinfo cdai;
 1095         struct scsi_vpd_id_descriptor *idd;
 1096 
 1097         xpt_path_assert(path, MA_OWNED);
 1098 
 1099         memset(&cdai, 0, sizeof(cdai));
 1100         xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
 1101         cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
 1102         cdai.bufsiz = len;
 1103 
 1104         if (!strcmp(attr, "GEOM::ident"))
 1105                 cdai.buftype = CDAI_TYPE_SERIAL_NUM;
 1106         else if (!strcmp(attr, "GEOM::physpath"))
 1107                 cdai.buftype = CDAI_TYPE_PHYS_PATH;
 1108         else if (strcmp(attr, "GEOM::lunid") == 0 ||
 1109                  strcmp(attr, "GEOM::lunname") == 0) {
 1110                 cdai.buftype = CDAI_TYPE_SCSI_DEVID;
 1111                 cdai.bufsiz = CAM_SCSI_DEVID_MAXLEN;
 1112         } else
 1113                 goto out;
 1114 
 1115         cdai.buf = malloc(cdai.bufsiz, M_CAMXPT, M_NOWAIT|M_ZERO);
 1116         if (cdai.buf == NULL) {
 1117                 ret = ENOMEM;
 1118                 goto out;
 1119         }
 1120         xpt_action((union ccb *)&cdai); /* can only be synchronous */
 1121         if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
 1122                 cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE);
 1123         if (cdai.provsiz == 0)
 1124                 goto out;
 1125         if (cdai.buftype == CDAI_TYPE_SCSI_DEVID) {
 1126                 if (strcmp(attr, "GEOM::lunid") == 0) {
 1127                         idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf,
 1128                             cdai.provsiz, scsi_devid_is_lun_naa);
 1129                         if (idd == NULL)
 1130                                 idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf,
 1131                                     cdai.provsiz, scsi_devid_is_lun_eui64);
 1132                 } else
 1133                         idd = NULL;
 1134                 if (idd == NULL)
 1135                         idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf,
 1136                             cdai.provsiz, scsi_devid_is_lun_t10);
 1137                 if (idd == NULL)
 1138                         idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf,
 1139                             cdai.provsiz, scsi_devid_is_lun_name);
 1140                 if (idd == NULL)
 1141                         goto out;
 1142                 ret = 0;
 1143                 if ((idd->proto_codeset & SVPD_ID_CODESET_MASK) == SVPD_ID_CODESET_ASCII ||
 1144                     (idd->proto_codeset & SVPD_ID_CODESET_MASK) == SVPD_ID_CODESET_UTF8) {
 1145                         l = strnlen(idd->identifier, idd->length);
 1146                         if (l < len) {
 1147                                 bcopy(idd->identifier, buf, l);
 1148                                 buf[l] = 0;
 1149                         } else
 1150                                 ret = EFAULT;
 1151                 } else {
 1152                         if (idd->length * 2 < len) {
 1153                                 for (l = 0; l < idd->length; l++)
 1154                                         sprintf(buf + l * 2, "%02x",
 1155                                             idd->identifier[l]);
 1156                         } else
 1157                                 ret = EFAULT;
 1158                 }
 1159         } else {
 1160                 ret = 0;
 1161                 if (strlcpy(buf, cdai.buf, len) >= len)
 1162                         ret = EFAULT;
 1163         }
 1164 
 1165 out:
 1166         if (cdai.buf != NULL)
 1167                 free(cdai.buf, M_CAMXPT);
 1168         return ret;
 1169 }
 1170 
 1171 static dev_match_ret
 1172 xptbusmatch(struct dev_match_pattern *patterns, u_int num_patterns,
 1173             struct cam_eb *bus)
 1174 {
 1175         dev_match_ret retval;
 1176         int i;
 1177 
 1178         retval = DM_RET_NONE;
 1179 
 1180         /*
 1181          * If we aren't given something to match against, that's an error.
 1182          */
 1183         if (bus == NULL)
 1184                 return(DM_RET_ERROR);
 1185 
 1186         /*
 1187          * If there are no match entries, then this bus matches no
 1188          * matter what.
 1189          */
 1190         if ((patterns == NULL) || (num_patterns == 0))
 1191                 return(DM_RET_DESCEND | DM_RET_COPY);
 1192 
 1193         for (i = 0; i < num_patterns; i++) {
 1194                 struct bus_match_pattern *cur_pattern;
 1195 
 1196                 /*
 1197                  * If the pattern in question isn't for a bus node, we
 1198                  * aren't interested.  However, we do indicate to the
 1199                  * calling routine that we should continue descending the
 1200                  * tree, since the user wants to match against lower-level
 1201                  * EDT elements.
 1202                  */
 1203                 if (patterns[i].type != DEV_MATCH_BUS) {
 1204                         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
 1205                                 retval |= DM_RET_DESCEND;
 1206                         continue;
 1207                 }
 1208 
 1209                 cur_pattern = &patterns[i].pattern.bus_pattern;
 1210 
 1211                 /*
 1212                  * If they want to match any bus node, we give them any
 1213                  * device node.
 1214                  */
 1215                 if (cur_pattern->flags == BUS_MATCH_ANY) {
 1216                         /* set the copy flag */
 1217                         retval |= DM_RET_COPY;
 1218 
 1219                         /*
 1220                          * If we've already decided on an action, go ahead
 1221                          * and return.
 1222                          */
 1223                         if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
 1224                                 return(retval);
 1225                 }
 1226 
 1227                 /*
 1228                  * Not sure why someone would do this...
 1229                  */
 1230                 if (cur_pattern->flags == BUS_MATCH_NONE)
 1231                         continue;
 1232 
 1233                 if (((cur_pattern->flags & BUS_MATCH_PATH) != 0)
 1234                  && (cur_pattern->path_id != bus->path_id))
 1235                         continue;
 1236 
 1237                 if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0)
 1238                  && (cur_pattern->bus_id != bus->sim->bus_id))
 1239                         continue;
 1240 
 1241                 if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0)
 1242                  && (cur_pattern->unit_number != bus->sim->unit_number))
 1243                         continue;
 1244 
 1245                 if (((cur_pattern->flags & BUS_MATCH_NAME) != 0)
 1246                  && (strncmp(cur_pattern->dev_name, bus->sim->sim_name,
 1247                              DEV_IDLEN) != 0))
 1248                         continue;
 1249 
 1250                 /*
 1251                  * If we get to this point, the user definitely wants
 1252                  * information on this bus.  So tell the caller to copy the
 1253                  * data out.
 1254                  */
 1255                 retval |= DM_RET_COPY;
 1256 
 1257                 /*
 1258                  * If the return action has been set to descend, then we
 1259                  * know that we've already seen a non-bus matching
 1260                  * expression, therefore we need to further descend the tree.
 1261                  * This won't change by continuing around the loop, so we
 1262                  * go ahead and return.  If we haven't seen a non-bus
 1263                  * matching expression, we keep going around the loop until
 1264                  * we exhaust the matching expressions.  We'll set the stop
 1265                  * flag once we fall out of the loop.
 1266                  */
 1267                 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
 1268                         return(retval);
 1269         }
 1270 
 1271         /*
 1272          * If the return action hasn't been set to descend yet, that means
 1273          * we haven't seen anything other than bus matching patterns.  So
 1274          * tell the caller to stop descending the tree -- the user doesn't
 1275          * want to match against lower level tree elements.
 1276          */
 1277         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
 1278                 retval |= DM_RET_STOP;
 1279 
 1280         return(retval);
 1281 }
 1282 
 1283 static dev_match_ret
 1284 xptdevicematch(struct dev_match_pattern *patterns, u_int num_patterns,
 1285                struct cam_ed *device)
 1286 {
 1287         dev_match_ret retval;
 1288         int i;
 1289 
 1290         retval = DM_RET_NONE;
 1291 
 1292         /*
 1293          * If we aren't given something to match against, that's an error.
 1294          */
 1295         if (device == NULL)
 1296                 return(DM_RET_ERROR);
 1297 
 1298         /*
 1299          * If there are no match entries, then this device matches no
 1300          * matter what.
 1301          */
 1302         if ((patterns == NULL) || (num_patterns == 0))
 1303                 return(DM_RET_DESCEND | DM_RET_COPY);
 1304 
 1305         for (i = 0; i < num_patterns; i++) {
 1306                 struct device_match_pattern *cur_pattern;
 1307                 struct scsi_vpd_device_id *device_id_page;
 1308 
 1309                 /*
 1310                  * If the pattern in question isn't for a device node, we
 1311                  * aren't interested.
 1312                  */
 1313                 if (patterns[i].type != DEV_MATCH_DEVICE) {
 1314                         if ((patterns[i].type == DEV_MATCH_PERIPH)
 1315                          && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE))
 1316                                 retval |= DM_RET_DESCEND;
 1317                         continue;
 1318                 }
 1319 
 1320                 cur_pattern = &patterns[i].pattern.device_pattern;
 1321 
 1322                 /* Error out if mutually exclusive options are specified. */ 
 1323                 if ((cur_pattern->flags & (DEV_MATCH_INQUIRY|DEV_MATCH_DEVID))
 1324                  == (DEV_MATCH_INQUIRY|DEV_MATCH_DEVID))
 1325                         return(DM_RET_ERROR);
 1326 
 1327                 /*
 1328                  * If they want to match any device node, we give them any
 1329                  * device node.
 1330                  */
 1331                 if (cur_pattern->flags == DEV_MATCH_ANY)
 1332                         goto copy_dev_node;
 1333 
 1334                 /*
 1335                  * Not sure why someone would do this...
 1336                  */
 1337                 if (cur_pattern->flags == DEV_MATCH_NONE)
 1338                         continue;
 1339 
 1340                 if (((cur_pattern->flags & DEV_MATCH_PATH) != 0)
 1341                  && (cur_pattern->path_id != device->target->bus->path_id))
 1342                         continue;
 1343 
 1344                 if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0)
 1345                  && (cur_pattern->target_id != device->target->target_id))
 1346                         continue;
 1347 
 1348                 if (((cur_pattern->flags & DEV_MATCH_LUN) != 0)
 1349                  && (cur_pattern->target_lun != device->lun_id))
 1350                         continue;
 1351 
 1352                 if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0)
 1353                  && (cam_quirkmatch((caddr_t)&device->inq_data,
 1354                                     (caddr_t)&cur_pattern->data.inq_pat,
 1355                                     1, sizeof(cur_pattern->data.inq_pat),
 1356                                     scsi_static_inquiry_match) == NULL))
 1357                         continue;
 1358 
 1359                 device_id_page = (struct scsi_vpd_device_id *)device->device_id;
 1360                 if (((cur_pattern->flags & DEV_MATCH_DEVID) != 0)
 1361                  && (device->device_id_len < SVPD_DEVICE_ID_HDR_LEN
 1362                   || scsi_devid_match((uint8_t *)device_id_page->desc_list,
 1363                                       device->device_id_len
 1364                                     - SVPD_DEVICE_ID_HDR_LEN,
 1365                                       cur_pattern->data.devid_pat.id,
 1366                                       cur_pattern->data.devid_pat.id_len) != 0))
 1367                         continue;
 1368 
 1369 copy_dev_node:
 1370                 /*
 1371                  * If we get to this point, the user definitely wants
 1372                  * information on this device.  So tell the caller to copy
 1373                  * the data out.
 1374                  */
 1375                 retval |= DM_RET_COPY;
 1376 
 1377                 /*
 1378                  * If the return action has been set to descend, then we
 1379                  * know that we've already seen a peripheral matching
 1380                  * expression, therefore we need to further descend the tree.
 1381                  * This won't change by continuing around the loop, so we
 1382                  * go ahead and return.  If we haven't seen a peripheral
 1383                  * matching expression, we keep going around the loop until
 1384                  * we exhaust the matching expressions.  We'll set the stop
 1385                  * flag once we fall out of the loop.
 1386                  */
 1387                 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
 1388                         return(retval);
 1389         }
 1390 
 1391         /*
 1392          * If the return action hasn't been set to descend yet, that means
 1393          * we haven't seen any peripheral matching patterns.  So tell the
 1394          * caller to stop descending the tree -- the user doesn't want to
 1395          * match against lower level tree elements.
 1396          */
 1397         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
 1398                 retval |= DM_RET_STOP;
 1399 
 1400         return(retval);
 1401 }
 1402 
 1403 /*
 1404  * Match a single peripheral against any number of match patterns.
 1405  */
 1406 static dev_match_ret
 1407 xptperiphmatch(struct dev_match_pattern *patterns, u_int num_patterns,
 1408                struct cam_periph *periph)
 1409 {
 1410         dev_match_ret retval;
 1411         int i;
 1412 
 1413         /*
 1414          * If we aren't given something to match against, that's an error.
 1415          */
 1416         if (periph == NULL)
 1417                 return(DM_RET_ERROR);
 1418 
 1419         /*
 1420          * If there are no match entries, then this peripheral matches no
 1421          * matter what.
 1422          */
 1423         if ((patterns == NULL) || (num_patterns == 0))
 1424                 return(DM_RET_STOP | DM_RET_COPY);
 1425 
 1426         /*
 1427          * There aren't any nodes below a peripheral node, so there's no
 1428          * reason to descend the tree any further.
 1429          */
 1430         retval = DM_RET_STOP;
 1431 
 1432         for (i = 0; i < num_patterns; i++) {
 1433                 struct periph_match_pattern *cur_pattern;
 1434 
 1435                 /*
 1436                  * If the pattern in question isn't for a peripheral, we
 1437                  * aren't interested.
 1438                  */
 1439                 if (patterns[i].type != DEV_MATCH_PERIPH)
 1440                         continue;
 1441 
 1442                 cur_pattern = &patterns[i].pattern.periph_pattern;
 1443 
 1444                 /*
 1445                  * If they want to match on anything, then we will do so.
 1446                  */
 1447                 if (cur_pattern->flags == PERIPH_MATCH_ANY) {
 1448                         /* set the copy flag */
 1449                         retval |= DM_RET_COPY;
 1450 
 1451                         /*
 1452                          * We've already set the return action to stop,
 1453                          * since there are no nodes below peripherals in
 1454                          * the tree.
 1455                          */
 1456                         return(retval);
 1457                 }
 1458 
 1459                 /*
 1460                  * Not sure why someone would do this...
 1461                  */
 1462                 if (cur_pattern->flags == PERIPH_MATCH_NONE)
 1463                         continue;
 1464 
 1465                 if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0)
 1466                  && (cur_pattern->path_id != periph->path->bus->path_id))
 1467                         continue;
 1468 
 1469                 /*
 1470                  * For the target and lun id's, we have to make sure the
 1471                  * target and lun pointers aren't NULL.  The xpt peripheral
 1472                  * has a wildcard target and device.
 1473                  */
 1474                 if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0)
 1475                  && ((periph->path->target == NULL)
 1476                  ||(cur_pattern->target_id != periph->path->target->target_id)))
 1477                         continue;
 1478 
 1479                 if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0)
 1480                  && ((periph->path->device == NULL)
 1481                  || (cur_pattern->target_lun != periph->path->device->lun_id)))
 1482                         continue;
 1483 
 1484                 if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0)
 1485                  && (cur_pattern->unit_number != periph->unit_number))
 1486                         continue;
 1487 
 1488                 if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0)
 1489                  && (strncmp(cur_pattern->periph_name, periph->periph_name,
 1490                              DEV_IDLEN) != 0))
 1491                         continue;
 1492 
 1493                 /*
 1494                  * If we get to this point, the user definitely wants
 1495                  * information on this peripheral.  So tell the caller to
 1496                  * copy the data out.
 1497                  */
 1498                 retval |= DM_RET_COPY;
 1499 
 1500                 /*
 1501                  * The return action has already been set to stop, since
 1502                  * peripherals don't have any nodes below them in the EDT.
 1503                  */
 1504                 return(retval);
 1505         }
 1506 
 1507         /*
 1508          * If we get to this point, the peripheral that was passed in
 1509          * doesn't match any of the patterns.
 1510          */
 1511         return(retval);
 1512 }
 1513 
 1514 static int
 1515 xptedtbusfunc(struct cam_eb *bus, void *arg)
 1516 {
 1517         struct ccb_dev_match *cdm;
 1518         struct cam_et *target;
 1519         dev_match_ret retval;
 1520 
 1521         cdm = (struct ccb_dev_match *)arg;
 1522 
 1523         /*
 1524          * If our position is for something deeper in the tree, that means
 1525          * that we've already seen this node.  So, we keep going down.
 1526          */
 1527         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 1528          && (cdm->pos.cookie.bus == bus)
 1529          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 1530          && (cdm->pos.cookie.target != NULL))
 1531                 retval = DM_RET_DESCEND;
 1532         else
 1533                 retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus);
 1534 
 1535         /*
 1536          * If we got an error, bail out of the search.
 1537          */
 1538         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
 1539                 cdm->status = CAM_DEV_MATCH_ERROR;
 1540                 return(0);
 1541         }
 1542 
 1543         /*
 1544          * If the copy flag is set, copy this bus out.
 1545          */
 1546         if (retval & DM_RET_COPY) {
 1547                 int spaceleft, j;
 1548 
 1549                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
 1550                         sizeof(struct dev_match_result));
 1551 
 1552                 /*
 1553                  * If we don't have enough space to put in another
 1554                  * match result, save our position and tell the
 1555                  * user there are more devices to check.
 1556                  */
 1557                 if (spaceleft < sizeof(struct dev_match_result)) {
 1558                         bzero(&cdm->pos, sizeof(cdm->pos));
 1559                         cdm->pos.position_type =
 1560                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS;
 1561 
 1562                         cdm->pos.cookie.bus = bus;
 1563                         cdm->pos.generations[CAM_BUS_GENERATION]=
 1564                                 xsoftc.bus_generation;
 1565                         cdm->status = CAM_DEV_MATCH_MORE;
 1566                         return(0);
 1567                 }
 1568                 j = cdm->num_matches;
 1569                 cdm->num_matches++;
 1570                 cdm->matches[j].type = DEV_MATCH_BUS;
 1571                 cdm->matches[j].result.bus_result.path_id = bus->path_id;
 1572                 cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id;
 1573                 cdm->matches[j].result.bus_result.unit_number =
 1574                         bus->sim->unit_number;
 1575                 strncpy(cdm->matches[j].result.bus_result.dev_name,
 1576                         bus->sim->sim_name, DEV_IDLEN);
 1577         }
 1578 
 1579         /*
 1580          * If the user is only interested in busses, there's no
 1581          * reason to descend to the next level in the tree.
 1582          */
 1583         if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
 1584                 return(1);
 1585 
 1586         /*
 1587          * If there is a target generation recorded, check it to
 1588          * make sure the target list hasn't changed.
 1589          */
 1590         mtx_lock(&bus->eb_mtx);
 1591         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 1592          && (cdm->pos.cookie.bus == bus)
 1593          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 1594          && (cdm->pos.cookie.target != NULL)) {
 1595                 if ((cdm->pos.generations[CAM_TARGET_GENERATION] !=
 1596                     bus->generation)) {
 1597                         mtx_unlock(&bus->eb_mtx);
 1598                         cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
 1599                         return (0);
 1600                 }
 1601                 target = (struct cam_et *)cdm->pos.cookie.target;
 1602                 target->refcount++;
 1603         } else
 1604                 target = NULL;
 1605         mtx_unlock(&bus->eb_mtx);
 1606 
 1607         return (xpttargettraverse(bus, target, xptedttargetfunc, arg));
 1608 }
 1609 
 1610 static int
 1611 xptedttargetfunc(struct cam_et *target, void *arg)
 1612 {
 1613         struct ccb_dev_match *cdm;
 1614         struct cam_eb *bus;
 1615         struct cam_ed *device;
 1616 
 1617         cdm = (struct ccb_dev_match *)arg;
 1618         bus = target->bus;
 1619 
 1620         /*
 1621          * If there is a device list generation recorded, check it to
 1622          * make sure the device list hasn't changed.
 1623          */
 1624         mtx_lock(&bus->eb_mtx);
 1625         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 1626          && (cdm->pos.cookie.bus == bus)
 1627          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 1628          && (cdm->pos.cookie.target == target)
 1629          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
 1630          && (cdm->pos.cookie.device != NULL)) {
 1631                 if (cdm->pos.generations[CAM_DEV_GENERATION] !=
 1632                     target->generation) {
 1633                         mtx_unlock(&bus->eb_mtx);
 1634                         cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
 1635                         return(0);
 1636                 }
 1637                 device = (struct cam_ed *)cdm->pos.cookie.device;
 1638                 device->refcount++;
 1639         } else
 1640                 device = NULL;
 1641         mtx_unlock(&bus->eb_mtx);
 1642 
 1643         return (xptdevicetraverse(target, device, xptedtdevicefunc, arg));
 1644 }
 1645 
 1646 static int
 1647 xptedtdevicefunc(struct cam_ed *device, void *arg)
 1648 {
 1649         struct cam_eb *bus;
 1650         struct cam_periph *periph;
 1651         struct ccb_dev_match *cdm;
 1652         dev_match_ret retval;
 1653 
 1654         cdm = (struct ccb_dev_match *)arg;
 1655         bus = device->target->bus;
 1656 
 1657         /*
 1658          * If our position is for something deeper in the tree, that means
 1659          * that we've already seen this node.  So, we keep going down.
 1660          */
 1661         if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE)
 1662          && (cdm->pos.cookie.device == device)
 1663          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
 1664          && (cdm->pos.cookie.periph != NULL))
 1665                 retval = DM_RET_DESCEND;
 1666         else
 1667                 retval = xptdevicematch(cdm->patterns, cdm->num_patterns,
 1668                                         device);
 1669 
 1670         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
 1671                 cdm->status = CAM_DEV_MATCH_ERROR;
 1672                 return(0);
 1673         }
 1674 
 1675         /*
 1676          * If the copy flag is set, copy this device out.
 1677          */
 1678         if (retval & DM_RET_COPY) {
 1679                 int spaceleft, j;
 1680 
 1681                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
 1682                         sizeof(struct dev_match_result));
 1683 
 1684                 /*
 1685                  * If we don't have enough space to put in another
 1686                  * match result, save our position and tell the
 1687                  * user there are more devices to check.
 1688                  */
 1689                 if (spaceleft < sizeof(struct dev_match_result)) {
 1690                         bzero(&cdm->pos, sizeof(cdm->pos));
 1691                         cdm->pos.position_type =
 1692                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
 1693                                 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE;
 1694 
 1695                         cdm->pos.cookie.bus = device->target->bus;
 1696                         cdm->pos.generations[CAM_BUS_GENERATION]=
 1697                                 xsoftc.bus_generation;
 1698                         cdm->pos.cookie.target = device->target;
 1699                         cdm->pos.generations[CAM_TARGET_GENERATION] =
 1700                                 device->target->bus->generation;
 1701                         cdm->pos.cookie.device = device;
 1702                         cdm->pos.generations[CAM_DEV_GENERATION] =
 1703                                 device->target->generation;
 1704                         cdm->status = CAM_DEV_MATCH_MORE;
 1705                         return(0);
 1706                 }
 1707                 j = cdm->num_matches;
 1708                 cdm->num_matches++;
 1709                 cdm->matches[j].type = DEV_MATCH_DEVICE;
 1710                 cdm->matches[j].result.device_result.path_id =
 1711                         device->target->bus->path_id;
 1712                 cdm->matches[j].result.device_result.target_id =
 1713                         device->target->target_id;
 1714                 cdm->matches[j].result.device_result.target_lun =
 1715                         device->lun_id;
 1716                 cdm->matches[j].result.device_result.protocol =
 1717                         device->protocol;
 1718                 bcopy(&device->inq_data,
 1719                       &cdm->matches[j].result.device_result.inq_data,
 1720                       sizeof(struct scsi_inquiry_data));
 1721                 bcopy(&device->ident_data,
 1722                       &cdm->matches[j].result.device_result.ident_data,
 1723                       sizeof(struct ata_params));
 1724 
 1725                 /* Let the user know whether this device is unconfigured */
 1726                 if (device->flags & CAM_DEV_UNCONFIGURED)
 1727                         cdm->matches[j].result.device_result.flags =
 1728                                 DEV_RESULT_UNCONFIGURED;
 1729                 else
 1730                         cdm->matches[j].result.device_result.flags =
 1731                                 DEV_RESULT_NOFLAG;
 1732         }
 1733 
 1734         /*
 1735          * If the user isn't interested in peripherals, don't descend
 1736          * the tree any further.
 1737          */
 1738         if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
 1739                 return(1);
 1740 
 1741         /*
 1742          * If there is a peripheral list generation recorded, make sure
 1743          * it hasn't changed.
 1744          */
 1745         xpt_lock_buses();
 1746         mtx_lock(&bus->eb_mtx);
 1747         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 1748          && (cdm->pos.cookie.bus == bus)
 1749          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 1750          && (cdm->pos.cookie.target == device->target)
 1751          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
 1752          && (cdm->pos.cookie.device == device)
 1753          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
 1754          && (cdm->pos.cookie.periph != NULL)) {
 1755                 if (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
 1756                     device->generation) {
 1757                         mtx_unlock(&bus->eb_mtx);
 1758                         xpt_unlock_buses();
 1759                         cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
 1760                         return(0);
 1761                 }
 1762                 periph = (struct cam_periph *)cdm->pos.cookie.periph;
 1763                 periph->refcount++;
 1764         } else
 1765                 periph = NULL;
 1766         mtx_unlock(&bus->eb_mtx);
 1767         xpt_unlock_buses();
 1768 
 1769         return (xptperiphtraverse(device, periph, xptedtperiphfunc, arg));
 1770 }
 1771 
 1772 static int
 1773 xptedtperiphfunc(struct cam_periph *periph, void *arg)
 1774 {
 1775         struct ccb_dev_match *cdm;
 1776         dev_match_ret retval;
 1777 
 1778         cdm = (struct ccb_dev_match *)arg;
 1779 
 1780         retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
 1781 
 1782         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
 1783                 cdm->status = CAM_DEV_MATCH_ERROR;
 1784                 return(0);
 1785         }
 1786 
 1787         /*
 1788          * If the copy flag is set, copy this peripheral out.
 1789          */
 1790         if (retval & DM_RET_COPY) {
 1791                 int spaceleft, j;
 1792 
 1793                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
 1794                         sizeof(struct dev_match_result));
 1795 
 1796                 /*
 1797                  * If we don't have enough space to put in another
 1798                  * match result, save our position and tell the
 1799                  * user there are more devices to check.
 1800                  */
 1801                 if (spaceleft < sizeof(struct dev_match_result)) {
 1802                         bzero(&cdm->pos, sizeof(cdm->pos));
 1803                         cdm->pos.position_type =
 1804                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
 1805                                 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE |
 1806                                 CAM_DEV_POS_PERIPH;
 1807 
 1808                         cdm->pos.cookie.bus = periph->path->bus;
 1809                         cdm->pos.generations[CAM_BUS_GENERATION]=
 1810                                 xsoftc.bus_generation;
 1811                         cdm->pos.cookie.target = periph->path->target;
 1812                         cdm->pos.generations[CAM_TARGET_GENERATION] =
 1813                                 periph->path->bus->generation;
 1814                         cdm->pos.cookie.device = periph->path->device;
 1815                         cdm->pos.generations[CAM_DEV_GENERATION] =
 1816                                 periph->path->target->generation;
 1817                         cdm->pos.cookie.periph = periph;
 1818                         cdm->pos.generations[CAM_PERIPH_GENERATION] =
 1819                                 periph->path->device->generation;
 1820                         cdm->status = CAM_DEV_MATCH_MORE;
 1821                         return(0);
 1822                 }
 1823 
 1824                 j = cdm->num_matches;
 1825                 cdm->num_matches++;
 1826                 cdm->matches[j].type = DEV_MATCH_PERIPH;
 1827                 cdm->matches[j].result.periph_result.path_id =
 1828                         periph->path->bus->path_id;
 1829                 cdm->matches[j].result.periph_result.target_id =
 1830                         periph->path->target->target_id;
 1831                 cdm->matches[j].result.periph_result.target_lun =
 1832                         periph->path->device->lun_id;
 1833                 cdm->matches[j].result.periph_result.unit_number =
 1834                         periph->unit_number;
 1835                 strncpy(cdm->matches[j].result.periph_result.periph_name,
 1836                         periph->periph_name, DEV_IDLEN);
 1837         }
 1838 
 1839         return(1);
 1840 }
 1841 
 1842 static int
 1843 xptedtmatch(struct ccb_dev_match *cdm)
 1844 {
 1845         struct cam_eb *bus;
 1846         int ret;
 1847 
 1848         cdm->num_matches = 0;
 1849 
 1850         /*
 1851          * Check the bus list generation.  If it has changed, the user
 1852          * needs to reset everything and start over.
 1853          */
 1854         xpt_lock_buses();
 1855         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 1856          && (cdm->pos.cookie.bus != NULL)) {
 1857                 if (cdm->pos.generations[CAM_BUS_GENERATION] !=
 1858                     xsoftc.bus_generation) {
 1859                         xpt_unlock_buses();
 1860                         cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
 1861                         return(0);
 1862                 }
 1863                 bus = (struct cam_eb *)cdm->pos.cookie.bus;
 1864                 bus->refcount++;
 1865         } else
 1866                 bus = NULL;
 1867         xpt_unlock_buses();
 1868 
 1869         ret = xptbustraverse(bus, xptedtbusfunc, cdm);
 1870 
 1871         /*
 1872          * If we get back 0, that means that we had to stop before fully
 1873          * traversing the EDT.  It also means that one of the subroutines
 1874          * has set the status field to the proper value.  If we get back 1,
 1875          * we've fully traversed the EDT and copied out any matching entries.
 1876          */
 1877         if (ret == 1)
 1878                 cdm->status = CAM_DEV_MATCH_LAST;
 1879 
 1880         return(ret);
 1881 }
 1882 
 1883 static int
 1884 xptplistpdrvfunc(struct periph_driver **pdrv, void *arg)
 1885 {
 1886         struct cam_periph *periph;
 1887         struct ccb_dev_match *cdm;
 1888 
 1889         cdm = (struct ccb_dev_match *)arg;
 1890 
 1891         xpt_lock_buses();
 1892         if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
 1893          && (cdm->pos.cookie.pdrv == pdrv)
 1894          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
 1895          && (cdm->pos.cookie.periph != NULL)) {
 1896                 if (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
 1897                     (*pdrv)->generation) {
 1898                         xpt_unlock_buses();
 1899                         cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
 1900                         return(0);
 1901                 }
 1902                 periph = (struct cam_periph *)cdm->pos.cookie.periph;
 1903                 periph->refcount++;
 1904         } else
 1905                 periph = NULL;
 1906         xpt_unlock_buses();
 1907 
 1908         return (xptpdperiphtraverse(pdrv, periph, xptplistperiphfunc, arg));
 1909 }
 1910 
 1911 static int
 1912 xptplistperiphfunc(struct cam_periph *periph, void *arg)
 1913 {
 1914         struct ccb_dev_match *cdm;
 1915         dev_match_ret retval;
 1916 
 1917         cdm = (struct ccb_dev_match *)arg;
 1918 
 1919         retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
 1920 
 1921         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
 1922                 cdm->status = CAM_DEV_MATCH_ERROR;
 1923                 return(0);
 1924         }
 1925 
 1926         /*
 1927          * If the copy flag is set, copy this peripheral out.
 1928          */
 1929         if (retval & DM_RET_COPY) {
 1930                 int spaceleft, j;
 1931 
 1932                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
 1933                         sizeof(struct dev_match_result));
 1934 
 1935                 /*
 1936                  * If we don't have enough space to put in another
 1937                  * match result, save our position and tell the
 1938                  * user there are more devices to check.
 1939                  */
 1940                 if (spaceleft < sizeof(struct dev_match_result)) {
 1941                         struct periph_driver **pdrv;
 1942 
 1943                         pdrv = NULL;
 1944                         bzero(&cdm->pos, sizeof(cdm->pos));
 1945                         cdm->pos.position_type =
 1946                                 CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR |
 1947                                 CAM_DEV_POS_PERIPH;
 1948 
 1949                         /*
 1950                          * This may look a bit non-sensical, but it is
 1951                          * actually quite logical.  There are very few
 1952                          * peripheral drivers, and bloating every peripheral
 1953                          * structure with a pointer back to its parent
 1954                          * peripheral driver linker set entry would cost
 1955                          * more in the long run than doing this quick lookup.
 1956                          */
 1957                         for (pdrv = periph_drivers; *pdrv != NULL; pdrv++) {
 1958                                 if (strcmp((*pdrv)->driver_name,
 1959                                     periph->periph_name) == 0)
 1960                                         break;
 1961                         }
 1962 
 1963                         if (*pdrv == NULL) {
 1964                                 cdm->status = CAM_DEV_MATCH_ERROR;
 1965                                 return(0);
 1966                         }
 1967 
 1968                         cdm->pos.cookie.pdrv = pdrv;
 1969                         /*
 1970                          * The periph generation slot does double duty, as
 1971                          * does the periph pointer slot.  They are used for
 1972                          * both edt and pdrv lookups and positioning.
 1973                          */
 1974                         cdm->pos.cookie.periph = periph;
 1975                         cdm->pos.generations[CAM_PERIPH_GENERATION] =
 1976                                 (*pdrv)->generation;
 1977                         cdm->status = CAM_DEV_MATCH_MORE;
 1978                         return(0);
 1979                 }
 1980 
 1981                 j = cdm->num_matches;
 1982                 cdm->num_matches++;
 1983                 cdm->matches[j].type = DEV_MATCH_PERIPH;
 1984                 cdm->matches[j].result.periph_result.path_id =
 1985                         periph->path->bus->path_id;
 1986 
 1987                 /*
 1988                  * The transport layer peripheral doesn't have a target or
 1989                  * lun.
 1990                  */
 1991                 if (periph->path->target)
 1992                         cdm->matches[j].result.periph_result.target_id =
 1993                                 periph->path->target->target_id;
 1994                 else
 1995                         cdm->matches[j].result.periph_result.target_id =
 1996                                 CAM_TARGET_WILDCARD;
 1997 
 1998                 if (periph->path->device)
 1999                         cdm->matches[j].result.periph_result.target_lun =
 2000                                 periph->path->device->lun_id;
 2001                 else
 2002                         cdm->matches[j].result.periph_result.target_lun =
 2003                                 CAM_LUN_WILDCARD;
 2004 
 2005                 cdm->matches[j].result.periph_result.unit_number =
 2006                         periph->unit_number;
 2007                 strncpy(cdm->matches[j].result.periph_result.periph_name,
 2008                         periph->periph_name, DEV_IDLEN);
 2009         }
 2010 
 2011         return(1);
 2012 }
 2013 
 2014 static int
 2015 xptperiphlistmatch(struct ccb_dev_match *cdm)
 2016 {
 2017         int ret;
 2018 
 2019         cdm->num_matches = 0;
 2020 
 2021         /*
 2022          * At this point in the edt traversal function, we check the bus
 2023          * list generation to make sure that no busses have been added or
 2024          * removed since the user last sent a XPT_DEV_MATCH ccb through.
 2025          * For the peripheral driver list traversal function, however, we
 2026          * don't have to worry about new peripheral driver types coming or
 2027          * going; they're in a linker set, and therefore can't change
 2028          * without a recompile.
 2029          */
 2030 
 2031         if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
 2032          && (cdm->pos.cookie.pdrv != NULL))
 2033                 ret = xptpdrvtraverse(
 2034                                 (struct periph_driver **)cdm->pos.cookie.pdrv,
 2035                                 xptplistpdrvfunc, cdm);
 2036         else
 2037                 ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm);
 2038 
 2039         /*
 2040          * If we get back 0, that means that we had to stop before fully
 2041          * traversing the peripheral driver tree.  It also means that one of
 2042          * the subroutines has set the status field to the proper value.  If
 2043          * we get back 1, we've fully traversed the EDT and copied out any
 2044          * matching entries.
 2045          */
 2046         if (ret == 1)
 2047                 cdm->status = CAM_DEV_MATCH_LAST;
 2048 
 2049         return(ret);
 2050 }
 2051 
 2052 static int
 2053 xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg)
 2054 {
 2055         struct cam_eb *bus, *next_bus;
 2056         int retval;
 2057 
 2058         retval = 1;
 2059         if (start_bus)
 2060                 bus = start_bus;
 2061         else {
 2062                 xpt_lock_buses();
 2063                 bus = TAILQ_FIRST(&xsoftc.xpt_busses);
 2064                 if (bus == NULL) {
 2065                         xpt_unlock_buses();
 2066                         return (retval);
 2067                 }
 2068                 bus->refcount++;
 2069                 xpt_unlock_buses();
 2070         }
 2071         for (; bus != NULL; bus = next_bus) {
 2072                 retval = tr_func(bus, arg);
 2073                 if (retval == 0) {
 2074                         xpt_release_bus(bus);
 2075                         break;
 2076                 }
 2077                 xpt_lock_buses();
 2078                 next_bus = TAILQ_NEXT(bus, links);
 2079                 if (next_bus)
 2080                         next_bus->refcount++;
 2081                 xpt_unlock_buses();
 2082                 xpt_release_bus(bus);
 2083         }
 2084         return(retval);
 2085 }
 2086 
 2087 static int
 2088 xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target,
 2089                   xpt_targetfunc_t *tr_func, void *arg)
 2090 {
 2091         struct cam_et *target, *next_target;
 2092         int retval;
 2093 
 2094         retval = 1;
 2095         if (start_target)
 2096                 target = start_target;
 2097         else {
 2098                 mtx_lock(&bus->eb_mtx);
 2099                 target = TAILQ_FIRST(&bus->et_entries);
 2100                 if (target == NULL) {
 2101                         mtx_unlock(&bus->eb_mtx);
 2102                         return (retval);
 2103                 }
 2104                 target->refcount++;
 2105                 mtx_unlock(&bus->eb_mtx);
 2106         }
 2107         for (; target != NULL; target = next_target) {
 2108                 retval = tr_func(target, arg);
 2109                 if (retval == 0) {
 2110                         xpt_release_target(target);
 2111                         break;
 2112                 }
 2113                 mtx_lock(&bus->eb_mtx);
 2114                 next_target = TAILQ_NEXT(target, links);
 2115                 if (next_target)
 2116                         next_target->refcount++;
 2117                 mtx_unlock(&bus->eb_mtx);
 2118                 xpt_release_target(target);
 2119         }
 2120         return(retval);
 2121 }
 2122 
 2123 static int
 2124 xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device,
 2125                   xpt_devicefunc_t *tr_func, void *arg)
 2126 {
 2127         struct cam_eb *bus;
 2128         struct cam_ed *device, *next_device;
 2129         int retval;
 2130 
 2131         retval = 1;
 2132         bus = target->bus;
 2133         if (start_device)
 2134                 device = start_device;
 2135         else {
 2136                 mtx_lock(&bus->eb_mtx);
 2137                 device = TAILQ_FIRST(&target->ed_entries);
 2138                 if (device == NULL) {
 2139                         mtx_unlock(&bus->eb_mtx);
 2140                         return (retval);
 2141                 }
 2142                 device->refcount++;
 2143                 mtx_unlock(&bus->eb_mtx);
 2144         }
 2145         for (; device != NULL; device = next_device) {
 2146                 mtx_lock(&device->device_mtx);
 2147                 retval = tr_func(device, arg);
 2148                 mtx_unlock(&device->device_mtx);
 2149                 if (retval == 0) {
 2150                         xpt_release_device(device);
 2151                         break;
 2152                 }
 2153                 mtx_lock(&bus->eb_mtx);
 2154                 next_device = TAILQ_NEXT(device, links);
 2155                 if (next_device)
 2156                         next_device->refcount++;
 2157                 mtx_unlock(&bus->eb_mtx);
 2158                 xpt_release_device(device);
 2159         }
 2160         return(retval);
 2161 }
 2162 
 2163 static int
 2164 xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph,
 2165                   xpt_periphfunc_t *tr_func, void *arg)
 2166 {
 2167         struct cam_eb *bus;
 2168         struct cam_periph *periph, *next_periph;
 2169         int retval;
 2170 
 2171         retval = 1;
 2172 
 2173         bus = device->target->bus;
 2174         if (start_periph)
 2175                 periph = start_periph;
 2176         else {
 2177                 xpt_lock_buses();
 2178                 mtx_lock(&bus->eb_mtx);
 2179                 periph = SLIST_FIRST(&device->periphs);
 2180                 while (periph != NULL && (periph->flags & CAM_PERIPH_FREE) != 0)
 2181                         periph = SLIST_NEXT(periph, periph_links);
 2182                 if (periph == NULL) {
 2183                         mtx_unlock(&bus->eb_mtx);
 2184                         xpt_unlock_buses();
 2185                         return (retval);
 2186                 }
 2187                 periph->refcount++;
 2188                 mtx_unlock(&bus->eb_mtx);
 2189                 xpt_unlock_buses();
 2190         }
 2191         for (; periph != NULL; periph = next_periph) {
 2192                 retval = tr_func(periph, arg);
 2193                 if (retval == 0) {
 2194                         cam_periph_release_locked(periph);
 2195                         break;
 2196                 }
 2197                 xpt_lock_buses();
 2198                 mtx_lock(&bus->eb_mtx);
 2199                 next_periph = SLIST_NEXT(periph, periph_links);
 2200                 while (next_periph != NULL &&
 2201                     (next_periph->flags & CAM_PERIPH_FREE) != 0)
 2202                         next_periph = SLIST_NEXT(periph, periph_links);
 2203                 if (next_periph)
 2204                         next_periph->refcount++;
 2205                 mtx_unlock(&bus->eb_mtx);
 2206                 xpt_unlock_buses();
 2207                 cam_periph_release_locked(periph);
 2208         }
 2209         return(retval);
 2210 }
 2211 
 2212 static int
 2213 xptpdrvtraverse(struct periph_driver **start_pdrv,
 2214                 xpt_pdrvfunc_t *tr_func, void *arg)
 2215 {
 2216         struct periph_driver **pdrv;
 2217         int retval;
 2218 
 2219         retval = 1;
 2220 
 2221         /*
 2222          * We don't traverse the peripheral driver list like we do the
 2223          * other lists, because it is a linker set, and therefore cannot be
 2224          * changed during runtime.  If the peripheral driver list is ever
 2225          * re-done to be something other than a linker set (i.e. it can
 2226          * change while the system is running), the list traversal should
 2227          * be modified to work like the other traversal functions.
 2228          */
 2229         for (pdrv = (start_pdrv ? start_pdrv : periph_drivers);
 2230              *pdrv != NULL; pdrv++) {
 2231                 retval = tr_func(pdrv, arg);
 2232 
 2233                 if (retval == 0)
 2234                         return(retval);
 2235         }
 2236 
 2237         return(retval);
 2238 }
 2239 
 2240 static int
 2241 xptpdperiphtraverse(struct periph_driver **pdrv,
 2242                     struct cam_periph *start_periph,
 2243                     xpt_periphfunc_t *tr_func, void *arg)
 2244 {
 2245         struct cam_periph *periph, *next_periph;
 2246         int retval;
 2247 
 2248         retval = 1;
 2249 
 2250         if (start_periph)
 2251                 periph = start_periph;
 2252         else {
 2253                 xpt_lock_buses();
 2254                 periph = TAILQ_FIRST(&(*pdrv)->units);
 2255                 while (periph != NULL && (periph->flags & CAM_PERIPH_FREE) != 0)
 2256                         periph = TAILQ_NEXT(periph, unit_links);
 2257                 if (periph == NULL) {
 2258                         xpt_unlock_buses();
 2259                         return (retval);
 2260                 }
 2261                 periph->refcount++;
 2262                 xpt_unlock_buses();
 2263         }
 2264         for (; periph != NULL; periph = next_periph) {
 2265                 cam_periph_lock(periph);
 2266                 retval = tr_func(periph, arg);
 2267                 cam_periph_unlock(periph);
 2268                 if (retval == 0) {
 2269                         cam_periph_release(periph);
 2270                         break;
 2271                 }
 2272                 xpt_lock_buses();
 2273                 next_periph = TAILQ_NEXT(periph, unit_links);
 2274                 while (next_periph != NULL &&
 2275                     (next_periph->flags & CAM_PERIPH_FREE) != 0)
 2276                         next_periph = TAILQ_NEXT(periph, unit_links);
 2277                 if (next_periph)
 2278                         next_periph->refcount++;
 2279                 xpt_unlock_buses();
 2280                 cam_periph_release(periph);
 2281         }
 2282         return(retval);
 2283 }
 2284 
 2285 static int
 2286 xptdefbusfunc(struct cam_eb *bus, void *arg)
 2287 {
 2288         struct xpt_traverse_config *tr_config;
 2289 
 2290         tr_config = (struct xpt_traverse_config *)arg;
 2291 
 2292         if (tr_config->depth == XPT_DEPTH_BUS) {
 2293                 xpt_busfunc_t *tr_func;
 2294 
 2295                 tr_func = (xpt_busfunc_t *)tr_config->tr_func;
 2296 
 2297                 return(tr_func(bus, tr_config->tr_arg));
 2298         } else
 2299                 return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg));
 2300 }
 2301 
 2302 static int
 2303 xptdeftargetfunc(struct cam_et *target, void *arg)
 2304 {
 2305         struct xpt_traverse_config *tr_config;
 2306 
 2307         tr_config = (struct xpt_traverse_config *)arg;
 2308 
 2309         if (tr_config->depth == XPT_DEPTH_TARGET) {
 2310                 xpt_targetfunc_t *tr_func;
 2311 
 2312                 tr_func = (xpt_targetfunc_t *)tr_config->tr_func;
 2313 
 2314                 return(tr_func(target, tr_config->tr_arg));
 2315         } else
 2316                 return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg));
 2317 }
 2318 
 2319 static int
 2320 xptdefdevicefunc(struct cam_ed *device, void *arg)
 2321 {
 2322         struct xpt_traverse_config *tr_config;
 2323 
 2324         tr_config = (struct xpt_traverse_config *)arg;
 2325 
 2326         if (tr_config->depth == XPT_DEPTH_DEVICE) {
 2327                 xpt_devicefunc_t *tr_func;
 2328 
 2329                 tr_func = (xpt_devicefunc_t *)tr_config->tr_func;
 2330 
 2331                 return(tr_func(device, tr_config->tr_arg));
 2332         } else
 2333                 return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg));
 2334 }
 2335 
 2336 static int
 2337 xptdefperiphfunc(struct cam_periph *periph, void *arg)
 2338 {
 2339         struct xpt_traverse_config *tr_config;
 2340         xpt_periphfunc_t *tr_func;
 2341 
 2342         tr_config = (struct xpt_traverse_config *)arg;
 2343 
 2344         tr_func = (xpt_periphfunc_t *)tr_config->tr_func;
 2345 
 2346         /*
 2347          * Unlike the other default functions, we don't check for depth
 2348          * here.  The peripheral driver level is the last level in the EDT,
 2349          * so if we're here, we should execute the function in question.
 2350          */
 2351         return(tr_func(periph, tr_config->tr_arg));
 2352 }
 2353 
 2354 /*
 2355  * Execute the given function for every bus in the EDT.
 2356  */
 2357 static int
 2358 xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg)
 2359 {
 2360         struct xpt_traverse_config tr_config;
 2361 
 2362         tr_config.depth = XPT_DEPTH_BUS;
 2363         tr_config.tr_func = tr_func;
 2364         tr_config.tr_arg = arg;
 2365 
 2366         return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
 2367 }
 2368 
 2369 /*
 2370  * Execute the given function for every device in the EDT.
 2371  */
 2372 static int
 2373 xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg)
 2374 {
 2375         struct xpt_traverse_config tr_config;
 2376 
 2377         tr_config.depth = XPT_DEPTH_DEVICE;
 2378         tr_config.tr_func = tr_func;
 2379         tr_config.tr_arg = arg;
 2380 
 2381         return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
 2382 }
 2383 
 2384 static int
 2385 xptsetasyncfunc(struct cam_ed *device, void *arg)
 2386 {
 2387         struct cam_path path;
 2388         struct ccb_getdev cgd;
 2389         struct ccb_setasync *csa = (struct ccb_setasync *)arg;
 2390 
 2391         /*
 2392          * Don't report unconfigured devices (Wildcard devs,
 2393          * devices only for target mode, device instances
 2394          * that have been invalidated but are waiting for
 2395          * their last reference count to be released).
 2396          */
 2397         if ((device->flags & CAM_DEV_UNCONFIGURED) != 0)
 2398                 return (1);
 2399 
 2400         xpt_compile_path(&path,
 2401                          NULL,
 2402                          device->target->bus->path_id,
 2403                          device->target->target_id,
 2404                          device->lun_id);
 2405         xpt_setup_ccb(&cgd.ccb_h, &path, CAM_PRIORITY_NORMAL);
 2406         cgd.ccb_h.func_code = XPT_GDEV_TYPE;
 2407         xpt_action((union ccb *)&cgd);
 2408         csa->callback(csa->callback_arg,
 2409                             AC_FOUND_DEVICE,
 2410                             &path, &cgd);
 2411         xpt_release_path(&path);
 2412 
 2413         return(1);
 2414 }
 2415 
 2416 static int
 2417 xptsetasyncbusfunc(struct cam_eb *bus, void *arg)
 2418 {
 2419         struct cam_path path;
 2420         struct ccb_pathinq cpi;
 2421         struct ccb_setasync *csa = (struct ccb_setasync *)arg;
 2422 
 2423         xpt_compile_path(&path, /*periph*/NULL,
 2424                          bus->path_id,
 2425                          CAM_TARGET_WILDCARD,
 2426                          CAM_LUN_WILDCARD);
 2427         xpt_path_lock(&path);
 2428         xpt_setup_ccb(&cpi.ccb_h, &path, CAM_PRIORITY_NORMAL);
 2429         cpi.ccb_h.func_code = XPT_PATH_INQ;
 2430         xpt_action((union ccb *)&cpi);
 2431         csa->callback(csa->callback_arg,
 2432                             AC_PATH_REGISTERED,
 2433                             &path, &cpi);
 2434         xpt_path_unlock(&path);
 2435         xpt_release_path(&path);
 2436 
 2437         return(1);
 2438 }
 2439 
 2440 void
 2441 xpt_action(union ccb *start_ccb)
 2442 {
 2443 
 2444         CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_action\n"));
 2445 
 2446         start_ccb->ccb_h.status = CAM_REQ_INPROG;
 2447         (*(start_ccb->ccb_h.path->bus->xport->action))(start_ccb);
 2448 }
 2449 
 2450 void
 2451 xpt_action_default(union ccb *start_ccb)
 2452 {
 2453         struct cam_path *path;
 2454         struct cam_sim *sim;
 2455         int lock;
 2456 
 2457         path = start_ccb->ccb_h.path;
 2458         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_action_default\n"));
 2459 
 2460         switch (start_ccb->ccb_h.func_code) {
 2461         case XPT_SCSI_IO:
 2462         {
 2463                 struct cam_ed *device;
 2464 
 2465                 /*
 2466                  * For the sake of compatibility with SCSI-1
 2467                  * devices that may not understand the identify
 2468                  * message, we include lun information in the
 2469                  * second byte of all commands.  SCSI-1 specifies
 2470                  * that luns are a 3 bit value and reserves only 3
 2471                  * bits for lun information in the CDB.  Later
 2472                  * revisions of the SCSI spec allow for more than 8
 2473                  * luns, but have deprecated lun information in the
 2474                  * CDB.  So, if the lun won't fit, we must omit.
 2475                  *
 2476                  * Also be aware that during initial probing for devices,
 2477                  * the inquiry information is unknown but initialized to 0.
 2478                  * This means that this code will be exercised while probing
 2479                  * devices with an ANSI revision greater than 2.
 2480                  */
 2481                 device = path->device;
 2482                 if (device->protocol_version <= SCSI_REV_2
 2483                  && start_ccb->ccb_h.target_lun < 8
 2484                  && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) {
 2485 
 2486                         start_ccb->csio.cdb_io.cdb_bytes[1] |=
 2487                             start_ccb->ccb_h.target_lun << 5;
 2488                 }
 2489                 start_ccb->csio.scsi_status = SCSI_STATUS_OK;
 2490         }
 2491         /* FALLTHROUGH */
 2492         case XPT_TARGET_IO:
 2493         case XPT_CONT_TARGET_IO:
 2494                 start_ccb->csio.sense_resid = 0;
 2495                 start_ccb->csio.resid = 0;
 2496                 /* FALLTHROUGH */
 2497         case XPT_ATA_IO:
 2498                 if (start_ccb->ccb_h.func_code == XPT_ATA_IO)
 2499                         start_ccb->ataio.resid = 0;
 2500                 /* FALLTHROUGH */
 2501         case XPT_RESET_DEV:
 2502         case XPT_ENG_EXEC:
 2503         case XPT_SMP_IO:
 2504         {
 2505                 struct cam_devq *devq;
 2506 
 2507                 devq = path->bus->sim->devq;
 2508                 mtx_lock(&devq->send_mtx);
 2509                 cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb);
 2510                 if (xpt_schedule_devq(devq, path->device) != 0)
 2511                         xpt_run_devq(devq);
 2512                 mtx_unlock(&devq->send_mtx);
 2513                 break;
 2514         }
 2515         case XPT_CALC_GEOMETRY:
 2516                 /* Filter out garbage */
 2517                 if (start_ccb->ccg.block_size == 0
 2518                  || start_ccb->ccg.volume_size == 0) {
 2519                         start_ccb->ccg.cylinders = 0;
 2520                         start_ccb->ccg.heads = 0;
 2521                         start_ccb->ccg.secs_per_track = 0;
 2522                         start_ccb->ccb_h.status = CAM_REQ_CMP;
 2523                         break;
 2524                 }
 2525 #if defined(PC98) || defined(__sparc64__)
 2526                 /*
 2527                  * In a PC-98 system, geometry translation depens on
 2528                  * the "real" device geometry obtained from mode page 4.
 2529                  * SCSI geometry translation is performed in the
 2530                  * initialization routine of the SCSI BIOS and the result
 2531                  * stored in host memory.  If the translation is available
 2532                  * in host memory, use it.  If not, rely on the default
 2533                  * translation the device driver performs.
 2534                  * For sparc64, we may need adjust the geometry of large
 2535                  * disks in order to fit the limitations of the 16-bit
 2536                  * fields of the VTOC8 disk label.
 2537                  */
 2538                 if (scsi_da_bios_params(&start_ccb->ccg) != 0) {
 2539                         start_ccb->ccb_h.status = CAM_REQ_CMP;
 2540                         break;
 2541                 }
 2542 #endif
 2543                 goto call_sim;
 2544         case XPT_ABORT:
 2545         {
 2546                 union ccb* abort_ccb;
 2547 
 2548                 abort_ccb = start_ccb->cab.abort_ccb;
 2549                 if (XPT_FC_IS_DEV_QUEUED(abort_ccb)) {
 2550 
 2551                         if (abort_ccb->ccb_h.pinfo.index >= 0) {
 2552                                 struct cam_ccbq *ccbq;
 2553                                 struct cam_ed *device;
 2554 
 2555                                 device = abort_ccb->ccb_h.path->device;
 2556                                 ccbq = &device->ccbq;
 2557                                 cam_ccbq_remove_ccb(ccbq, abort_ccb);
 2558                                 abort_ccb->ccb_h.status =
 2559                                     CAM_REQ_ABORTED|CAM_DEV_QFRZN;
 2560                                 xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
 2561                                 xpt_done(abort_ccb);
 2562                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 2563                                 break;
 2564                         }
 2565                         if (abort_ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX
 2566                          && (abort_ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) {
 2567                                 /*
 2568                                  * We've caught this ccb en route to
 2569                                  * the SIM.  Flag it for abort and the
 2570                                  * SIM will do so just before starting
 2571                                  * real work on the CCB.
 2572                                  */
 2573                                 abort_ccb->ccb_h.status =
 2574                                     CAM_REQ_ABORTED|CAM_DEV_QFRZN;
 2575                                 xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
 2576                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 2577                                 break;
 2578                         }
 2579                 }
 2580                 if (XPT_FC_IS_QUEUED(abort_ccb)
 2581                  && (abort_ccb->ccb_h.pinfo.index == CAM_DONEQ_INDEX)) {
 2582                         /*
 2583                          * It's already completed but waiting
 2584                          * for our SWI to get to it.
 2585                          */
 2586                         start_ccb->ccb_h.status = CAM_UA_ABORT;
 2587                         break;
 2588                 }
 2589                 /*
 2590                  * If we weren't able to take care of the abort request
 2591                  * in the XPT, pass the request down to the SIM for processing.
 2592                  */
 2593         }
 2594         /* FALLTHROUGH */
 2595         case XPT_ACCEPT_TARGET_IO:
 2596         case XPT_EN_LUN:
 2597         case XPT_IMMED_NOTIFY:
 2598         case XPT_NOTIFY_ACK:
 2599         case XPT_RESET_BUS:
 2600         case XPT_IMMEDIATE_NOTIFY:
 2601         case XPT_NOTIFY_ACKNOWLEDGE:
 2602         case XPT_GET_SIM_KNOB:
 2603         case XPT_SET_SIM_KNOB:
 2604         case XPT_GET_TRAN_SETTINGS:
 2605         case XPT_SET_TRAN_SETTINGS:
 2606         case XPT_PATH_INQ:
 2607 call_sim:
 2608                 sim = path->bus->sim;
 2609                 lock = (mtx_owned(sim->mtx) == 0);
 2610                 if (lock)
 2611                         CAM_SIM_LOCK(sim);
 2612                 (*(sim->sim_action))(sim, start_ccb);
 2613                 if (lock)
 2614                         CAM_SIM_UNLOCK(sim);
 2615                 break;
 2616         case XPT_PATH_STATS:
 2617                 start_ccb->cpis.last_reset = path->bus->last_reset;
 2618                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 2619                 break;
 2620         case XPT_GDEV_TYPE:
 2621         {
 2622                 struct cam_ed *dev;
 2623 
 2624                 dev = path->device;
 2625                 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
 2626                         start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
 2627                 } else {
 2628                         struct ccb_getdev *cgd;
 2629 
 2630                         cgd = &start_ccb->cgd;
 2631                         cgd->protocol = dev->protocol;
 2632                         cgd->inq_data = dev->inq_data;
 2633                         cgd->ident_data = dev->ident_data;
 2634                         cgd->inq_flags = dev->inq_flags;
 2635                         cgd->ccb_h.status = CAM_REQ_CMP;
 2636                         cgd->serial_num_len = dev->serial_num_len;
 2637                         if ((dev->serial_num_len > 0)
 2638                          && (dev->serial_num != NULL))
 2639                                 bcopy(dev->serial_num, cgd->serial_num,
 2640                                       dev->serial_num_len);
 2641                 }
 2642                 break;
 2643         }
 2644         case XPT_GDEV_STATS:
 2645         {
 2646                 struct cam_ed *dev;
 2647 
 2648                 dev = path->device;
 2649                 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
 2650                         start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
 2651                 } else {
 2652                         struct ccb_getdevstats *cgds;
 2653                         struct cam_eb *bus;
 2654                         struct cam_et *tar;
 2655 
 2656                         cgds = &start_ccb->cgds;
 2657                         bus = path->bus;
 2658                         tar = path->target;
 2659                         cgds->dev_openings = dev->ccbq.dev_openings;
 2660                         cgds->dev_active = dev->ccbq.dev_active;
 2661                         cgds->devq_openings = dev->ccbq.devq_openings;
 2662                         cgds->devq_queued = cam_ccbq_pending_ccb_count(&dev->ccbq);
 2663                         cgds->held = dev->ccbq.held;
 2664                         cgds->last_reset = tar->last_reset;
 2665                         cgds->maxtags = dev->maxtags;
 2666                         cgds->mintags = dev->mintags;
 2667                         if (timevalcmp(&tar->last_reset, &bus->last_reset, <))
 2668                                 cgds->last_reset = bus->last_reset;
 2669                         cgds->ccb_h.status = CAM_REQ_CMP;
 2670                 }
 2671                 break;
 2672         }
 2673         case XPT_GDEVLIST:
 2674         {
 2675                 struct cam_periph       *nperiph;
 2676                 struct periph_list      *periph_head;
 2677                 struct ccb_getdevlist   *cgdl;
 2678                 u_int                   i;
 2679                 struct cam_ed           *device;
 2680                 int                     found;
 2681 
 2682 
 2683                 found = 0;
 2684 
 2685                 /*
 2686                  * Don't want anyone mucking with our data.
 2687                  */
 2688                 device = path->device;
 2689                 periph_head = &device->periphs;
 2690                 cgdl = &start_ccb->cgdl;
 2691 
 2692                 /*
 2693                  * Check and see if the list has changed since the user
 2694                  * last requested a list member.  If so, tell them that the
 2695                  * list has changed, and therefore they need to start over
 2696                  * from the beginning.
 2697                  */
 2698                 if ((cgdl->index != 0) &&
 2699                     (cgdl->generation != device->generation)) {
 2700                         cgdl->status = CAM_GDEVLIST_LIST_CHANGED;
 2701                         break;
 2702                 }
 2703 
 2704                 /*
 2705                  * Traverse the list of peripherals and attempt to find
 2706                  * the requested peripheral.
 2707                  */
 2708                 for (nperiph = SLIST_FIRST(periph_head), i = 0;
 2709                      (nperiph != NULL) && (i <= cgdl->index);
 2710                      nperiph = SLIST_NEXT(nperiph, periph_links), i++) {
 2711                         if (i == cgdl->index) {
 2712                                 strncpy(cgdl->periph_name,
 2713                                         nperiph->periph_name,
 2714                                         DEV_IDLEN);
 2715                                 cgdl->unit_number = nperiph->unit_number;
 2716                                 found = 1;
 2717                         }
 2718                 }
 2719                 if (found == 0) {
 2720                         cgdl->status = CAM_GDEVLIST_ERROR;
 2721                         break;
 2722                 }
 2723 
 2724                 if (nperiph == NULL)
 2725                         cgdl->status = CAM_GDEVLIST_LAST_DEVICE;
 2726                 else
 2727                         cgdl->status = CAM_GDEVLIST_MORE_DEVS;
 2728 
 2729                 cgdl->index++;
 2730                 cgdl->generation = device->generation;
 2731 
 2732                 cgdl->ccb_h.status = CAM_REQ_CMP;
 2733                 break;
 2734         }
 2735         case XPT_DEV_MATCH:
 2736         {
 2737                 dev_pos_type position_type;
 2738                 struct ccb_dev_match *cdm;
 2739 
 2740                 cdm = &start_ccb->cdm;
 2741 
 2742                 /*
 2743                  * There are two ways of getting at information in the EDT.
 2744                  * The first way is via the primary EDT tree.  It starts
 2745                  * with a list of busses, then a list of targets on a bus,
 2746                  * then devices/luns on a target, and then peripherals on a
 2747                  * device/lun.  The "other" way is by the peripheral driver
 2748                  * lists.  The peripheral driver lists are organized by
 2749                  * peripheral driver.  (obviously)  So it makes sense to
 2750                  * use the peripheral driver list if the user is looking
 2751                  * for something like "da1", or all "da" devices.  If the
 2752                  * user is looking for something on a particular bus/target
 2753                  * or lun, it's generally better to go through the EDT tree.
 2754                  */
 2755 
 2756                 if (cdm->pos.position_type != CAM_DEV_POS_NONE)
 2757                         position_type = cdm->pos.position_type;
 2758                 else {
 2759                         u_int i;
 2760 
 2761                         position_type = CAM_DEV_POS_NONE;
 2762 
 2763                         for (i = 0; i < cdm->num_patterns; i++) {
 2764                                 if ((cdm->patterns[i].type == DEV_MATCH_BUS)
 2765                                  ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){
 2766                                         position_type = CAM_DEV_POS_EDT;
 2767                                         break;
 2768                                 }
 2769                         }
 2770 
 2771                         if (cdm->num_patterns == 0)
 2772                                 position_type = CAM_DEV_POS_EDT;
 2773                         else if (position_type == CAM_DEV_POS_NONE)
 2774                                 position_type = CAM_DEV_POS_PDRV;
 2775                 }
 2776 
 2777                 switch(position_type & CAM_DEV_POS_TYPEMASK) {
 2778                 case CAM_DEV_POS_EDT:
 2779                         xptedtmatch(cdm);
 2780                         break;
 2781                 case CAM_DEV_POS_PDRV:
 2782                         xptperiphlistmatch(cdm);
 2783                         break;
 2784                 default:
 2785                         cdm->status = CAM_DEV_MATCH_ERROR;
 2786                         break;
 2787                 }
 2788 
 2789                 if (cdm->status == CAM_DEV_MATCH_ERROR)
 2790                         start_ccb->ccb_h.status = CAM_REQ_CMP_ERR;
 2791                 else
 2792                         start_ccb->ccb_h.status = CAM_REQ_CMP;
 2793 
 2794                 break;
 2795         }
 2796         case XPT_SASYNC_CB:
 2797         {
 2798                 struct ccb_setasync *csa;
 2799                 struct async_node *cur_entry;
 2800                 struct async_list *async_head;
 2801                 u_int32_t added;
 2802 
 2803                 csa = &start_ccb->csa;
 2804                 added = csa->event_enable;
 2805                 async_head = &path->device->asyncs;
 2806 
 2807                 /*
 2808                  * If there is already an entry for us, simply
 2809                  * update it.
 2810                  */
 2811                 cur_entry = SLIST_FIRST(async_head);
 2812                 while (cur_entry != NULL) {
 2813                         if ((cur_entry->callback_arg == csa->callback_arg)
 2814                          && (cur_entry->callback == csa->callback))
 2815                                 break;
 2816                         cur_entry = SLIST_NEXT(cur_entry, links);
 2817                 }
 2818 
 2819                 if (cur_entry != NULL) {
 2820                         /*
 2821                          * If the request has no flags set,
 2822                          * remove the entry.
 2823                          */
 2824                         added &= ~cur_entry->event_enable;
 2825                         if (csa->event_enable == 0) {
 2826                                 SLIST_REMOVE(async_head, cur_entry,
 2827                                              async_node, links);
 2828                                 xpt_release_device(path->device);
 2829                                 free(cur_entry, M_CAMXPT);
 2830                         } else {
 2831                                 cur_entry->event_enable = csa->event_enable;
 2832                         }
 2833                         csa->event_enable = added;
 2834                 } else {
 2835                         cur_entry = malloc(sizeof(*cur_entry), M_CAMXPT,
 2836                                            M_NOWAIT);
 2837                         if (cur_entry == NULL) {
 2838                                 csa->ccb_h.status = CAM_RESRC_UNAVAIL;
 2839                                 break;
 2840                         }
 2841                         cur_entry->event_enable = csa->event_enable;
 2842                         cur_entry->event_lock =
 2843                             mtx_owned(path->bus->sim->mtx) ? 1 : 0;
 2844                         cur_entry->callback_arg = csa->callback_arg;
 2845                         cur_entry->callback = csa->callback;
 2846                         SLIST_INSERT_HEAD(async_head, cur_entry, links);
 2847                         xpt_acquire_device(path->device);
 2848                 }
 2849                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 2850                 break;
 2851         }
 2852         case XPT_REL_SIMQ:
 2853         {
 2854                 struct ccb_relsim *crs;
 2855                 struct cam_ed *dev;
 2856 
 2857                 crs = &start_ccb->crs;
 2858                 dev = path->device;
 2859                 if (dev == NULL) {
 2860 
 2861                         crs->ccb_h.status = CAM_DEV_NOT_THERE;
 2862                         break;
 2863                 }
 2864 
 2865                 if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) {
 2866 
 2867                         /* Don't ever go below one opening */
 2868                         if (crs->openings > 0) {
 2869                                 xpt_dev_ccbq_resize(path, crs->openings);
 2870                                 if (bootverbose) {
 2871                                         xpt_print(path,
 2872                                             "number of openings is now %d\n",
 2873                                             crs->openings);
 2874                                 }
 2875                         }
 2876                 }
 2877 
 2878                 mtx_lock(&dev->sim->devq->send_mtx);
 2879                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) {
 2880 
 2881                         if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
 2882 
 2883                                 /*
 2884                                  * Just extend the old timeout and decrement
 2885                                  * the freeze count so that a single timeout
 2886                                  * is sufficient for releasing the queue.
 2887                                  */
 2888                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
 2889                                 callout_stop(&dev->callout);
 2890                         } else {
 2891 
 2892                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
 2893                         }
 2894 
 2895                         callout_reset(&dev->callout,
 2896                             (crs->release_timeout * hz) / 1000,
 2897                             xpt_release_devq_timeout, dev);
 2898 
 2899                         dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING;
 2900 
 2901                 }
 2902 
 2903                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) {
 2904 
 2905                         if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) {
 2906                                 /*
 2907                                  * Decrement the freeze count so that a single
 2908                                  * completion is still sufficient to unfreeze
 2909                                  * the queue.
 2910                                  */
 2911                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
 2912                         } else {
 2913 
 2914                                 dev->flags |= CAM_DEV_REL_ON_COMPLETE;
 2915                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
 2916                         }
 2917                 }
 2918 
 2919                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) {
 2920 
 2921                         if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
 2922                          || (dev->ccbq.dev_active == 0)) {
 2923 
 2924                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
 2925                         } else {
 2926 
 2927                                 dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY;
 2928                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
 2929                         }
 2930                 }
 2931                 mtx_unlock(&dev->sim->devq->send_mtx);
 2932 
 2933                 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0)
 2934                         xpt_release_devq(path, /*count*/1, /*run_queue*/TRUE);
 2935                 start_ccb->crs.qfrozen_cnt = dev->ccbq.queue.qfrozen_cnt;
 2936                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 2937                 break;
 2938         }
 2939         case XPT_DEBUG: {
 2940                 struct cam_path *oldpath;
 2941 
 2942                 /* Check that all request bits are supported. */
 2943                 if (start_ccb->cdbg.flags & ~(CAM_DEBUG_COMPILE)) {
 2944                         start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
 2945                         break;
 2946                 }
 2947 
 2948                 cam_dflags = CAM_DEBUG_NONE;
 2949                 if (cam_dpath != NULL) {
 2950                         oldpath = cam_dpath;
 2951                         cam_dpath = NULL;
 2952                         xpt_free_path(oldpath);
 2953                 }
 2954                 if (start_ccb->cdbg.flags != CAM_DEBUG_NONE) {
 2955                         if (xpt_create_path(&cam_dpath, NULL,
 2956                                             start_ccb->ccb_h.path_id,
 2957                                             start_ccb->ccb_h.target_id,
 2958                                             start_ccb->ccb_h.target_lun) !=
 2959                                             CAM_REQ_CMP) {
 2960                                 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
 2961                         } else {
 2962                                 cam_dflags = start_ccb->cdbg.flags;
 2963                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 2964                                 xpt_print(cam_dpath, "debugging flags now %x\n",
 2965                                     cam_dflags);
 2966                         }
 2967                 } else
 2968                         start_ccb->ccb_h.status = CAM_REQ_CMP;
 2969                 break;
 2970         }
 2971         case XPT_NOOP:
 2972                 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0)
 2973                         xpt_freeze_devq(path, 1);
 2974                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 2975                 break;
 2976         default:
 2977         case XPT_SDEV_TYPE:
 2978         case XPT_TERM_IO:
 2979         case XPT_ENG_INQ:
 2980                 /* XXX Implement */
 2981                 printf("%s: CCB type %#x not supported\n", __func__,
 2982                        start_ccb->ccb_h.func_code);
 2983                 start_ccb->ccb_h.status = CAM_PROVIDE_FAIL;
 2984                 if (start_ccb->ccb_h.func_code & XPT_FC_DEV_QUEUED) {
 2985                         xpt_done(start_ccb);
 2986                 }
 2987                 break;
 2988         }
 2989 }
 2990 
 2991 void
 2992 xpt_polled_action(union ccb *start_ccb)
 2993 {
 2994         u_int32_t timeout;
 2995         struct    cam_sim *sim;
 2996         struct    cam_devq *devq;
 2997         struct    cam_ed *dev;
 2998 
 2999         timeout = start_ccb->ccb_h.timeout * 10;
 3000         sim = start_ccb->ccb_h.path->bus->sim;
 3001         devq = sim->devq;
 3002         dev = start_ccb->ccb_h.path->device;
 3003 
 3004         mtx_unlock(&dev->device_mtx);
 3005 
 3006         /*
 3007          * Steal an opening so that no other queued requests
 3008          * can get it before us while we simulate interrupts.
 3009          */
 3010         mtx_lock(&devq->send_mtx);
 3011         dev->ccbq.devq_openings--;
 3012         dev->ccbq.dev_openings--;
 3013         while((devq->send_openings <= 0 || dev->ccbq.dev_openings < 0) &&
 3014             (--timeout > 0)) {
 3015                 mtx_unlock(&devq->send_mtx);
 3016                 DELAY(100);
 3017                 CAM_SIM_LOCK(sim);
 3018                 (*(sim->sim_poll))(sim);
 3019                 CAM_SIM_UNLOCK(sim);
 3020                 camisr_runqueue();
 3021                 mtx_lock(&devq->send_mtx);
 3022         }
 3023         dev->ccbq.devq_openings++;
 3024         dev->ccbq.dev_openings++;
 3025         mtx_unlock(&devq->send_mtx);
 3026 
 3027         if (timeout != 0) {
 3028                 xpt_action(start_ccb);
 3029                 while(--timeout > 0) {
 3030                         CAM_SIM_LOCK(sim);
 3031                         (*(sim->sim_poll))(sim);
 3032                         CAM_SIM_UNLOCK(sim);
 3033                         camisr_runqueue();
 3034                         if ((start_ccb->ccb_h.status  & CAM_STATUS_MASK)
 3035                             != CAM_REQ_INPROG)
 3036                                 break;
 3037                         DELAY(100);
 3038                 }
 3039                 if (timeout == 0) {
 3040                         /*
 3041                          * XXX Is it worth adding a sim_timeout entry
 3042                          * point so we can attempt recovery?  If
 3043                          * this is only used for dumps, I don't think
 3044                          * it is.
 3045                          */
 3046                         start_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
 3047                 }
 3048         } else {
 3049                 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
 3050         }
 3051 
 3052         mtx_lock(&dev->device_mtx);
 3053 }
 3054 
 3055 /*
 3056  * Schedule a peripheral driver to receive a ccb when it's
 3057  * target device has space for more transactions.
 3058  */
 3059 void
 3060 xpt_schedule(struct cam_periph *periph, u_int32_t new_priority)
 3061 {
 3062 
 3063         CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n"));
 3064         cam_periph_assert(periph, MA_OWNED);
 3065         if (new_priority < periph->scheduled_priority) {
 3066                 periph->scheduled_priority = new_priority;
 3067                 xpt_run_allocq(periph, 0);
 3068         }
 3069 }
 3070 
 3071 
 3072 /*
 3073  * Schedule a device to run on a given queue.
 3074  * If the device was inserted as a new entry on the queue,
 3075  * return 1 meaning the device queue should be run. If we
 3076  * were already queued, implying someone else has already
 3077  * started the queue, return 0 so the caller doesn't attempt
 3078  * to run the queue.
 3079  */
 3080 static int
 3081 xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo,
 3082                  u_int32_t new_priority)
 3083 {
 3084         int retval;
 3085         u_int32_t old_priority;
 3086 
 3087         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_schedule_dev\n"));
 3088 
 3089         old_priority = pinfo->priority;
 3090 
 3091         /*
 3092          * Are we already queued?
 3093          */
 3094         if (pinfo->index != CAM_UNQUEUED_INDEX) {
 3095                 /* Simply reorder based on new priority */
 3096                 if (new_priority < old_priority) {
 3097                         camq_change_priority(queue, pinfo->index,
 3098                                              new_priority);
 3099                         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3100                                         ("changed priority to %d\n",
 3101                                          new_priority));
 3102                         retval = 1;
 3103                 } else
 3104                         retval = 0;
 3105         } else {
 3106                 /* New entry on the queue */
 3107                 if (new_priority < old_priority)
 3108                         pinfo->priority = new_priority;
 3109 
 3110                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3111                                 ("Inserting onto queue\n"));
 3112                 pinfo->generation = ++queue->generation;
 3113                 camq_insert(queue, pinfo);
 3114                 retval = 1;
 3115         }
 3116         return (retval);
 3117 }
 3118 
 3119 static void
 3120 xpt_run_allocq_task(void *context, int pending)
 3121 {
 3122         struct cam_periph *periph = context;
 3123 
 3124         cam_periph_lock(periph);
 3125         periph->flags &= ~CAM_PERIPH_RUN_TASK;
 3126         xpt_run_allocq(periph, 1);
 3127         cam_periph_unlock(periph);
 3128         cam_periph_release(periph);
 3129 }
 3130 
 3131 static void
 3132 xpt_run_allocq(struct cam_periph *periph, int sleep)
 3133 {
 3134         struct cam_ed   *device;
 3135         union ccb       *ccb;
 3136         uint32_t         prio;
 3137 
 3138         cam_periph_assert(periph, MA_OWNED);
 3139         if (periph->periph_allocating)
 3140                 return;
 3141         periph->periph_allocating = 1;
 3142         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_allocq(%p)\n", periph));
 3143         device = periph->path->device;
 3144         ccb = NULL;
 3145 restart:
 3146         while ((prio = min(periph->scheduled_priority,
 3147             periph->immediate_priority)) != CAM_PRIORITY_NONE &&
 3148             (periph->periph_allocated - (ccb != NULL ? 1 : 0) <
 3149              device->ccbq.total_openings || prio <= CAM_PRIORITY_OOB)) {
 3150 
 3151                 if (ccb == NULL &&
 3152                     (ccb = xpt_get_ccb_nowait(periph)) == NULL) {
 3153                         if (sleep) {
 3154                                 ccb = xpt_get_ccb(periph);
 3155                                 goto restart;
 3156                         }
 3157                         if (periph->flags & CAM_PERIPH_RUN_TASK)
 3158                                 break;
 3159                         cam_periph_doacquire(periph);
 3160                         periph->flags |= CAM_PERIPH_RUN_TASK;
 3161                         taskqueue_enqueue(xsoftc.xpt_taskq,
 3162                             &periph->periph_run_task);
 3163                         break;
 3164                 }
 3165                 xpt_setup_ccb(&ccb->ccb_h, periph->path, prio);
 3166                 if (prio == periph->immediate_priority) {
 3167                         periph->immediate_priority = CAM_PRIORITY_NONE;
 3168                         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3169                                         ("waking cam_periph_getccb()\n"));
 3170                         SLIST_INSERT_HEAD(&periph->ccb_list, &ccb->ccb_h,
 3171                                           periph_links.sle);
 3172                         wakeup(&periph->ccb_list);
 3173                 } else {
 3174                         periph->scheduled_priority = CAM_PRIORITY_NONE;
 3175                         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3176                                         ("calling periph_start()\n"));
 3177                         periph->periph_start(periph, ccb);
 3178                 }
 3179                 ccb = NULL;
 3180         }
 3181         if (ccb != NULL)
 3182                 xpt_release_ccb(ccb);
 3183         periph->periph_allocating = 0;
 3184 }
 3185 
 3186 static void
 3187 xpt_run_devq(struct cam_devq *devq)
 3188 {
 3189         char cdb_str[(SCSI_MAX_CDBLEN * 3) + 1];
 3190         int lock;
 3191 
 3192         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_devq\n"));
 3193 
 3194         devq->send_queue.qfrozen_cnt++;
 3195         while ((devq->send_queue.entries > 0)
 3196             && (devq->send_openings > 0)
 3197             && (devq->send_queue.qfrozen_cnt <= 1)) {
 3198                 struct  cam_ed *device;
 3199                 union ccb *work_ccb;
 3200                 struct  cam_sim *sim;
 3201 
 3202                 device = (struct cam_ed *)camq_remove(&devq->send_queue,
 3203                                                            CAMQ_HEAD);
 3204                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3205                                 ("running device %p\n", device));
 3206 
 3207                 work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD);
 3208                 if (work_ccb == NULL) {
 3209                         printf("device on run queue with no ccbs???\n");
 3210                         continue;
 3211                 }
 3212 
 3213                 if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) {
 3214 
 3215                         mtx_lock(&xsoftc.xpt_highpower_lock);
 3216                         if (xsoftc.num_highpower <= 0) {
 3217                                 /*
 3218                                  * We got a high power command, but we
 3219                                  * don't have any available slots.  Freeze
 3220                                  * the device queue until we have a slot
 3221                                  * available.
 3222                                  */
 3223                                 xpt_freeze_devq_device(device, 1);
 3224                                 STAILQ_INSERT_TAIL(&xsoftc.highpowerq, device,
 3225                                                    highpowerq_entry);
 3226 
 3227                                 mtx_unlock(&xsoftc.xpt_highpower_lock);
 3228                                 continue;
 3229                         } else {
 3230                                 /*
 3231                                  * Consume a high power slot while
 3232                                  * this ccb runs.
 3233                                  */
 3234                                 xsoftc.num_highpower--;
 3235                         }
 3236                         mtx_unlock(&xsoftc.xpt_highpower_lock);
 3237                 }
 3238                 cam_ccbq_remove_ccb(&device->ccbq, work_ccb);
 3239                 cam_ccbq_send_ccb(&device->ccbq, work_ccb);
 3240                 devq->send_openings--;
 3241                 devq->send_active++;
 3242                 xpt_schedule_devq(devq, device);
 3243                 mtx_unlock(&devq->send_mtx);
 3244 
 3245                 if ((work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0) {
 3246                         /*
 3247                          * The client wants to freeze the queue
 3248                          * after this CCB is sent.
 3249                          */
 3250                         xpt_freeze_devq(work_ccb->ccb_h.path, 1);
 3251                 }
 3252 
 3253                 /* In Target mode, the peripheral driver knows best... */
 3254                 if (work_ccb->ccb_h.func_code == XPT_SCSI_IO) {
 3255                         if ((device->inq_flags & SID_CmdQue) != 0
 3256                          && work_ccb->csio.tag_action != CAM_TAG_ACTION_NONE)
 3257                                 work_ccb->ccb_h.flags |= CAM_TAG_ACTION_VALID;
 3258                         else
 3259                                 /*
 3260                                  * Clear this in case of a retried CCB that
 3261                                  * failed due to a rejected tag.
 3262                                  */
 3263                                 work_ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
 3264                 }
 3265 
 3266                 switch (work_ccb->ccb_h.func_code) {
 3267                 case XPT_SCSI_IO:
 3268                         CAM_DEBUG(work_ccb->ccb_h.path,
 3269                             CAM_DEBUG_CDB,("%s. CDB: %s\n",
 3270                              scsi_op_desc(work_ccb->csio.cdb_io.cdb_bytes[0],
 3271                                           &device->inq_data),
 3272                              scsi_cdb_string(work_ccb->csio.cdb_io.cdb_bytes,
 3273                                              cdb_str, sizeof(cdb_str))));
 3274                         break;
 3275                 case XPT_ATA_IO:
 3276                         CAM_DEBUG(work_ccb->ccb_h.path,
 3277                             CAM_DEBUG_CDB,("%s. ACB: %s\n",
 3278                              ata_op_string(&work_ccb->ataio.cmd),
 3279                              ata_cmd_string(&work_ccb->ataio.cmd,
 3280                                             cdb_str, sizeof(cdb_str))));
 3281                         break;
 3282                 default:
 3283                         break;
 3284                 }
 3285 
 3286                 /*
 3287                  * Device queues can be shared among multiple SIM instances
 3288                  * that reside on different busses.  Use the SIM from the
 3289                  * queued device, rather than the one from the calling bus.
 3290                  */
 3291                 sim = device->sim;
 3292                 lock = (mtx_owned(sim->mtx) == 0);
 3293                 if (lock)
 3294                         CAM_SIM_LOCK(sim);
 3295                 (*(sim->sim_action))(sim, work_ccb);
 3296                 if (lock)
 3297                         CAM_SIM_UNLOCK(sim);
 3298                 mtx_lock(&devq->send_mtx);
 3299         }
 3300         devq->send_queue.qfrozen_cnt--;
 3301 }
 3302 
 3303 /*
 3304  * This function merges stuff from the slave ccb into the master ccb, while
 3305  * keeping important fields in the master ccb constant.
 3306  */
 3307 void
 3308 xpt_merge_ccb(union ccb *master_ccb, union ccb *slave_ccb)
 3309 {
 3310 
 3311         /*
 3312          * Pull fields that are valid for peripheral drivers to set
 3313          * into the master CCB along with the CCB "payload".
 3314          */
 3315         master_ccb->ccb_h.retry_count = slave_ccb->ccb_h.retry_count;
 3316         master_ccb->ccb_h.func_code = slave_ccb->ccb_h.func_code;
 3317         master_ccb->ccb_h.timeout = slave_ccb->ccb_h.timeout;
 3318         master_ccb->ccb_h.flags = slave_ccb->ccb_h.flags;
 3319         bcopy(&(&slave_ccb->ccb_h)[1], &(&master_ccb->ccb_h)[1],
 3320               sizeof(union ccb) - sizeof(struct ccb_hdr));
 3321 }
 3322 
 3323 void
 3324 xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority)
 3325 {
 3326 
 3327         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_setup_ccb\n"));
 3328         ccb_h->pinfo.priority = priority;
 3329         ccb_h->path = path;
 3330         ccb_h->path_id = path->bus->path_id;
 3331         if (path->target)
 3332                 ccb_h->target_id = path->target->target_id;
 3333         else
 3334                 ccb_h->target_id = CAM_TARGET_WILDCARD;
 3335         if (path->device) {
 3336                 ccb_h->target_lun = path->device->lun_id;
 3337                 ccb_h->pinfo.generation = ++path->device->ccbq.queue.generation;
 3338         } else {
 3339                 ccb_h->target_lun = CAM_TARGET_WILDCARD;
 3340         }
 3341         ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
 3342         ccb_h->flags = 0;
 3343         ccb_h->xflags = 0;
 3344 }
 3345 
 3346 /* Path manipulation functions */
 3347 cam_status
 3348 xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph,
 3349                 path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
 3350 {
 3351         struct     cam_path *path;
 3352         cam_status status;
 3353 
 3354         path = (struct cam_path *)malloc(sizeof(*path), M_CAMPATH, M_NOWAIT);
 3355 
 3356         if (path == NULL) {
 3357                 status = CAM_RESRC_UNAVAIL;
 3358                 return(status);
 3359         }
 3360         status = xpt_compile_path(path, perph, path_id, target_id, lun_id);
 3361         if (status != CAM_REQ_CMP) {
 3362                 free(path, M_CAMPATH);
 3363                 path = NULL;
 3364         }
 3365         *new_path_ptr = path;
 3366         return (status);
 3367 }
 3368 
 3369 cam_status
 3370 xpt_create_path_unlocked(struct cam_path **new_path_ptr,
 3371                          struct cam_periph *periph, path_id_t path_id,
 3372                          target_id_t target_id, lun_id_t lun_id)
 3373 {
 3374 
 3375         return (xpt_create_path(new_path_ptr, periph, path_id, target_id,
 3376             lun_id));
 3377 }
 3378 
 3379 cam_status
 3380 xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph,
 3381                  path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
 3382 {
 3383         struct       cam_eb *bus;
 3384         struct       cam_et *target;
 3385         struct       cam_ed *device;
 3386         cam_status   status;
 3387 
 3388         status = CAM_REQ_CMP;   /* Completed without error */
 3389         target = NULL;          /* Wildcarded */
 3390         device = NULL;          /* Wildcarded */
 3391 
 3392         /*
 3393          * We will potentially modify the EDT, so block interrupts
 3394          * that may attempt to create cam paths.
 3395          */
 3396         bus = xpt_find_bus(path_id);
 3397         if (bus == NULL) {
 3398                 status = CAM_PATH_INVALID;
 3399         } else {
 3400                 xpt_lock_buses();
 3401                 mtx_lock(&bus->eb_mtx);
 3402                 target = xpt_find_target(bus, target_id);
 3403                 if (target == NULL) {
 3404                         /* Create one */
 3405                         struct cam_et *new_target;
 3406 
 3407                         new_target = xpt_alloc_target(bus, target_id);
 3408                         if (new_target == NULL) {
 3409                                 status = CAM_RESRC_UNAVAIL;
 3410                         } else {
 3411                                 target = new_target;
 3412                         }
 3413                 }
 3414                 xpt_unlock_buses();
 3415                 if (target != NULL) {
 3416                         device = xpt_find_device(target, lun_id);
 3417                         if (device == NULL) {
 3418                                 /* Create one */
 3419                                 struct cam_ed *new_device;
 3420 
 3421                                 new_device =
 3422                                     (*(bus->xport->alloc_device))(bus,
 3423                                                                       target,
 3424                                                                       lun_id);
 3425                                 if (new_device == NULL) {
 3426                                         status = CAM_RESRC_UNAVAIL;
 3427                                 } else {
 3428                                         device = new_device;
 3429                                 }
 3430                         }
 3431                 }
 3432                 mtx_unlock(&bus->eb_mtx);
 3433         }
 3434 
 3435         /*
 3436          * Only touch the user's data if we are successful.
 3437          */
 3438         if (status == CAM_REQ_CMP) {
 3439                 new_path->periph = perph;
 3440                 new_path->bus = bus;
 3441                 new_path->target = target;
 3442                 new_path->device = device;
 3443                 CAM_DEBUG(new_path, CAM_DEBUG_TRACE, ("xpt_compile_path\n"));
 3444         } else {
 3445                 if (device != NULL)
 3446                         xpt_release_device(device);
 3447                 if (target != NULL)
 3448                         xpt_release_target(target);
 3449                 if (bus != NULL)
 3450                         xpt_release_bus(bus);
 3451         }
 3452         return (status);
 3453 }
 3454 
 3455 cam_status
 3456 xpt_clone_path(struct cam_path **new_path_ptr, struct cam_path *path)
 3457 {
 3458         struct     cam_path *new_path;
 3459 
 3460         new_path = (struct cam_path *)malloc(sizeof(*path), M_CAMPATH, M_NOWAIT);
 3461         if (new_path == NULL)
 3462                 return(CAM_RESRC_UNAVAIL);
 3463         xpt_copy_path(new_path, path);
 3464         *new_path_ptr = new_path;
 3465         return (CAM_REQ_CMP);
 3466 }
 3467 
 3468 void
 3469 xpt_copy_path(struct cam_path *new_path, struct cam_path *path)
 3470 {
 3471 
 3472         *new_path = *path;
 3473         if (path->bus != NULL)
 3474                 xpt_acquire_bus(path->bus);
 3475         if (path->target != NULL)
 3476                 xpt_acquire_target(path->target);
 3477         if (path->device != NULL)
 3478                 xpt_acquire_device(path->device);
 3479 }
 3480 
 3481 void
 3482 xpt_release_path(struct cam_path *path)
 3483 {
 3484         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_path\n"));
 3485         if (path->device != NULL) {
 3486                 xpt_release_device(path->device);
 3487                 path->device = NULL;
 3488         }
 3489         if (path->target != NULL) {
 3490                 xpt_release_target(path->target);
 3491                 path->target = NULL;
 3492         }
 3493         if (path->bus != NULL) {
 3494                 xpt_release_bus(path->bus);
 3495                 path->bus = NULL;
 3496         }
 3497 }
 3498 
 3499 void
 3500 xpt_free_path(struct cam_path *path)
 3501 {
 3502 
 3503         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_free_path\n"));
 3504         xpt_release_path(path);
 3505         free(path, M_CAMPATH);
 3506 }
 3507 
 3508 void
 3509 xpt_path_counts(struct cam_path *path, uint32_t *bus_ref,
 3510     uint32_t *periph_ref, uint32_t *target_ref, uint32_t *device_ref)
 3511 {
 3512 
 3513         xpt_lock_buses();
 3514         if (bus_ref) {
 3515                 if (path->bus)
 3516                         *bus_ref = path->bus->refcount;
 3517                 else
 3518                         *bus_ref = 0;
 3519         }
 3520         if (periph_ref) {
 3521                 if (path->periph)
 3522                         *periph_ref = path->periph->refcount;
 3523                 else
 3524                         *periph_ref = 0;
 3525         }
 3526         xpt_unlock_buses();
 3527         if (target_ref) {
 3528                 if (path->target)
 3529                         *target_ref = path->target->refcount;
 3530                 else
 3531                         *target_ref = 0;
 3532         }
 3533         if (device_ref) {
 3534                 if (path->device)
 3535                         *device_ref = path->device->refcount;
 3536                 else
 3537                         *device_ref = 0;
 3538         }
 3539 }
 3540 
 3541 /*
 3542  * Return -1 for failure, 0 for exact match, 1 for match with wildcards
 3543  * in path1, 2 for match with wildcards in path2.
 3544  */
 3545 int
 3546 xpt_path_comp(struct cam_path *path1, struct cam_path *path2)
 3547 {
 3548         int retval = 0;
 3549 
 3550         if (path1->bus != path2->bus) {
 3551                 if (path1->bus->path_id == CAM_BUS_WILDCARD)
 3552                         retval = 1;
 3553                 else if (path2->bus->path_id == CAM_BUS_WILDCARD)
 3554                         retval = 2;
 3555                 else
 3556                         return (-1);
 3557         }
 3558         if (path1->target != path2->target) {
 3559                 if (path1->target->target_id == CAM_TARGET_WILDCARD) {
 3560                         if (retval == 0)
 3561                                 retval = 1;
 3562                 } else if (path2->target->target_id == CAM_TARGET_WILDCARD)
 3563                         retval = 2;
 3564                 else
 3565                         return (-1);
 3566         }
 3567         if (path1->device != path2->device) {
 3568                 if (path1->device->lun_id == CAM_LUN_WILDCARD) {
 3569                         if (retval == 0)
 3570                                 retval = 1;
 3571                 } else if (path2->device->lun_id == CAM_LUN_WILDCARD)
 3572                         retval = 2;
 3573                 else
 3574                         return (-1);
 3575         }
 3576         return (retval);
 3577 }
 3578 
 3579 int
 3580 xpt_path_comp_dev(struct cam_path *path, struct cam_ed *dev)
 3581 {
 3582         int retval = 0;
 3583 
 3584         if (path->bus != dev->target->bus) {
 3585                 if (path->bus->path_id == CAM_BUS_WILDCARD)
 3586                         retval = 1;
 3587                 else if (dev->target->bus->path_id == CAM_BUS_WILDCARD)
 3588                         retval = 2;
 3589                 else
 3590                         return (-1);
 3591         }
 3592         if (path->target != dev->target) {
 3593                 if (path->target->target_id == CAM_TARGET_WILDCARD) {
 3594                         if (retval == 0)
 3595                                 retval = 1;
 3596                 } else if (dev->target->target_id == CAM_TARGET_WILDCARD)
 3597                         retval = 2;
 3598                 else
 3599                         return (-1);
 3600         }
 3601         if (path->device != dev) {
 3602                 if (path->device->lun_id == CAM_LUN_WILDCARD) {
 3603                         if (retval == 0)
 3604                                 retval = 1;
 3605                 } else if (dev->lun_id == CAM_LUN_WILDCARD)
 3606                         retval = 2;
 3607                 else
 3608                         return (-1);
 3609         }
 3610         return (retval);
 3611 }
 3612 
 3613 void
 3614 xpt_print_path(struct cam_path *path)
 3615 {
 3616 
 3617         if (path == NULL)
 3618                 printf("(nopath): ");
 3619         else {
 3620                 if (path->periph != NULL)
 3621                         printf("(%s%d:", path->periph->periph_name,
 3622                                path->periph->unit_number);
 3623                 else
 3624                         printf("(noperiph:");
 3625 
 3626                 if (path->bus != NULL)
 3627                         printf("%s%d:%d:", path->bus->sim->sim_name,
 3628                                path->bus->sim->unit_number,
 3629                                path->bus->sim->bus_id);
 3630                 else
 3631                         printf("nobus:");
 3632 
 3633                 if (path->target != NULL)
 3634                         printf("%d:", path->target->target_id);
 3635                 else
 3636                         printf("X:");
 3637 
 3638                 if (path->device != NULL)
 3639                         printf("%jx): ", (uintmax_t)path->device->lun_id);
 3640                 else
 3641                         printf("X): ");
 3642         }
 3643 }
 3644 
 3645 void
 3646 xpt_print_device(struct cam_ed *device)
 3647 {
 3648 
 3649         if (device == NULL)
 3650                 printf("(nopath): ");
 3651         else {
 3652                 printf("(noperiph:%s%d:%d:%d:%jx): ", device->sim->sim_name,
 3653                        device->sim->unit_number,
 3654                        device->sim->bus_id,
 3655                        device->target->target_id,
 3656                        (uintmax_t)device->lun_id);
 3657         }
 3658 }
 3659 
 3660 void
 3661 xpt_print(struct cam_path *path, const char *fmt, ...)
 3662 {
 3663         va_list ap;
 3664         xpt_print_path(path);
 3665         va_start(ap, fmt);
 3666         vprintf(fmt, ap);
 3667         va_end(ap);
 3668 }
 3669 
 3670 int
 3671 xpt_path_string(struct cam_path *path, char *str, size_t str_len)
 3672 {
 3673         struct sbuf sb;
 3674 
 3675         sbuf_new(&sb, str, str_len, 0);
 3676 
 3677         if (path == NULL)
 3678                 sbuf_printf(&sb, "(nopath): ");
 3679         else {
 3680                 if (path->periph != NULL)
 3681                         sbuf_printf(&sb, "(%s%d:", path->periph->periph_name,
 3682                                     path->periph->unit_number);
 3683                 else
 3684                         sbuf_printf(&sb, "(noperiph:");
 3685 
 3686                 if (path->bus != NULL)
 3687                         sbuf_printf(&sb, "%s%d:%d:", path->bus->sim->sim_name,
 3688                                     path->bus->sim->unit_number,
 3689                                     path->bus->sim->bus_id);
 3690                 else
 3691                         sbuf_printf(&sb, "nobus:");
 3692 
 3693                 if (path->target != NULL)
 3694                         sbuf_printf(&sb, "%d:", path->target->target_id);
 3695                 else
 3696                         sbuf_printf(&sb, "X:");
 3697 
 3698                 if (path->device != NULL)
 3699                         sbuf_printf(&sb, "%jx): ",
 3700                             (uintmax_t)path->device->lun_id);
 3701                 else
 3702                         sbuf_printf(&sb, "X): ");
 3703         }
 3704         sbuf_finish(&sb);
 3705 
 3706         return(sbuf_len(&sb));
 3707 }
 3708 
 3709 path_id_t
 3710 xpt_path_path_id(struct cam_path *path)
 3711 {
 3712         return(path->bus->path_id);
 3713 }
 3714 
 3715 target_id_t
 3716 xpt_path_target_id(struct cam_path *path)
 3717 {
 3718         if (path->target != NULL)
 3719                 return (path->target->target_id);
 3720         else
 3721                 return (CAM_TARGET_WILDCARD);
 3722 }
 3723 
 3724 lun_id_t
 3725 xpt_path_lun_id(struct cam_path *path)
 3726 {
 3727         if (path->device != NULL)
 3728                 return (path->device->lun_id);
 3729         else
 3730                 return (CAM_LUN_WILDCARD);
 3731 }
 3732 
 3733 struct cam_sim *
 3734 xpt_path_sim(struct cam_path *path)
 3735 {
 3736 
 3737         return (path->bus->sim);
 3738 }
 3739 
 3740 struct cam_periph*
 3741 xpt_path_periph(struct cam_path *path)
 3742 {
 3743 
 3744         return (path->periph);
 3745 }
 3746 
 3747 int
 3748 xpt_path_legacy_ata_id(struct cam_path *path)
 3749 {
 3750         struct cam_eb *bus;
 3751         int bus_id;
 3752 
 3753         if ((strcmp(path->bus->sim->sim_name, "ata") != 0) &&
 3754             strcmp(path->bus->sim->sim_name, "ahcich") != 0 &&
 3755             strcmp(path->bus->sim->sim_name, "mvsch") != 0 &&
 3756             strcmp(path->bus->sim->sim_name, "siisch") != 0)
 3757                 return (-1);
 3758 
 3759         if (strcmp(path->bus->sim->sim_name, "ata") == 0 &&
 3760             path->bus->sim->unit_number < 2) {
 3761                 bus_id = path->bus->sim->unit_number;
 3762         } else {
 3763                 bus_id = 2;
 3764                 xpt_lock_buses();
 3765                 TAILQ_FOREACH(bus, &xsoftc.xpt_busses, links) {
 3766                         if (bus == path->bus)
 3767                                 break;
 3768                         if ((strcmp(bus->sim->sim_name, "ata") == 0 &&
 3769                              bus->sim->unit_number >= 2) ||
 3770                             strcmp(bus->sim->sim_name, "ahcich") == 0 ||
 3771                             strcmp(bus->sim->sim_name, "mvsch") == 0 ||
 3772                             strcmp(bus->sim->sim_name, "siisch") == 0)
 3773                                 bus_id++;
 3774                 }
 3775                 xpt_unlock_buses();
 3776         }
 3777         if (path->target != NULL) {
 3778                 if (path->target->target_id < 2)
 3779                         return (bus_id * 2 + path->target->target_id);
 3780                 else
 3781                         return (-1);
 3782         } else
 3783                 return (bus_id * 2);
 3784 }
 3785 
 3786 /*
 3787  * Release a CAM control block for the caller.  Remit the cost of the structure
 3788  * to the device referenced by the path.  If the this device had no 'credits'
 3789  * and peripheral drivers have registered async callbacks for this notification
 3790  * call them now.
 3791  */
 3792 void
 3793 xpt_release_ccb(union ccb *free_ccb)
 3794 {
 3795         struct   cam_ed *device;
 3796         struct   cam_periph *periph;
 3797 
 3798         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_release_ccb\n"));
 3799         xpt_path_assert(free_ccb->ccb_h.path, MA_OWNED);
 3800         device = free_ccb->ccb_h.path->device;
 3801         periph = free_ccb->ccb_h.path->periph;
 3802 
 3803         xpt_free_ccb(free_ccb);
 3804         periph->periph_allocated--;
 3805         cam_ccbq_release_opening(&device->ccbq);
 3806         xpt_run_allocq(periph, 0);
 3807 }
 3808 
 3809 /* Functions accessed by SIM drivers */
 3810 
 3811 static struct xpt_xport xport_default = {
 3812         .alloc_device = xpt_alloc_device_default,
 3813         .action = xpt_action_default,
 3814         .async = xpt_dev_async_default,
 3815 };
 3816 
 3817 /*
 3818  * A sim structure, listing the SIM entry points and instance
 3819  * identification info is passed to xpt_bus_register to hook the SIM
 3820  * into the CAM framework.  xpt_bus_register creates a cam_eb entry
 3821  * for this new bus and places it in the array of busses and assigns
 3822  * it a path_id.  The path_id may be influenced by "hard wiring"
 3823  * information specified by the user.  Once interrupt services are
 3824  * available, the bus will be probed.
 3825  */
 3826 int32_t
 3827 xpt_bus_register(struct cam_sim *sim, device_t parent, u_int32_t bus)
 3828 {
 3829         struct cam_eb *new_bus;
 3830         struct cam_eb *old_bus;
 3831         struct ccb_pathinq cpi;
 3832         struct cam_path *path;
 3833         cam_status status;
 3834 
 3835         mtx_assert(sim->mtx, MA_OWNED);
 3836 
 3837         sim->bus_id = bus;
 3838         new_bus = (struct cam_eb *)malloc(sizeof(*new_bus),
 3839                                           M_CAMXPT, M_NOWAIT|M_ZERO);
 3840         if (new_bus == NULL) {
 3841                 /* Couldn't satisfy request */
 3842                 return (CAM_RESRC_UNAVAIL);
 3843         }
 3844 
 3845         mtx_init(&new_bus->eb_mtx, "CAM bus lock", NULL, MTX_DEF);
 3846         TAILQ_INIT(&new_bus->et_entries);
 3847         cam_sim_hold(sim);
 3848         new_bus->sim = sim;
 3849         timevalclear(&new_bus->last_reset);
 3850         new_bus->flags = 0;
 3851         new_bus->refcount = 1;  /* Held until a bus_deregister event */
 3852         new_bus->generation = 0;
 3853 
 3854         xpt_lock_buses();
 3855         sim->path_id = new_bus->path_id =
 3856             xptpathid(sim->sim_name, sim->unit_number, sim->bus_id);
 3857         old_bus = TAILQ_FIRST(&xsoftc.xpt_busses);
 3858         while (old_bus != NULL
 3859             && old_bus->path_id < new_bus->path_id)
 3860                 old_bus = TAILQ_NEXT(old_bus, links);
 3861         if (old_bus != NULL)
 3862                 TAILQ_INSERT_BEFORE(old_bus, new_bus, links);
 3863         else
 3864                 TAILQ_INSERT_TAIL(&xsoftc.xpt_busses, new_bus, links);
 3865         xsoftc.bus_generation++;
 3866         xpt_unlock_buses();
 3867 
 3868         /*
 3869          * Set a default transport so that a PATH_INQ can be issued to
 3870          * the SIM.  This will then allow for probing and attaching of
 3871          * a more appropriate transport.
 3872          */
 3873         new_bus->xport = &xport_default;
 3874 
 3875         status = xpt_create_path(&path, /*periph*/NULL, sim->path_id,
 3876                                   CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
 3877         if (status != CAM_REQ_CMP) {
 3878                 xpt_release_bus(new_bus);
 3879                 free(path, M_CAMXPT);
 3880                 return (CAM_RESRC_UNAVAIL);
 3881         }
 3882 
 3883         xpt_setup_ccb(&cpi.ccb_h, path, CAM_PRIORITY_NORMAL);
 3884         cpi.ccb_h.func_code = XPT_PATH_INQ;
 3885         xpt_action((union ccb *)&cpi);
 3886 
 3887         if (cpi.ccb_h.status == CAM_REQ_CMP) {
 3888                 switch (cpi.transport) {
 3889                 case XPORT_SPI:
 3890                 case XPORT_SAS:
 3891                 case XPORT_FC:
 3892                 case XPORT_USB:
 3893                 case XPORT_ISCSI:
 3894                 case XPORT_SRP:
 3895                 case XPORT_PPB:
 3896                         new_bus->xport = scsi_get_xport();
 3897                         break;
 3898                 case XPORT_ATA:
 3899                 case XPORT_SATA:
 3900                         new_bus->xport = ata_get_xport();
 3901                         break;
 3902                 default:
 3903                         new_bus->xport = &xport_default;
 3904                         break;
 3905                 }
 3906         }
 3907 
 3908         /* Notify interested parties */
 3909         if (sim->path_id != CAM_XPT_PATH_ID) {
 3910 
 3911                 xpt_async(AC_PATH_REGISTERED, path, &cpi);
 3912                 if ((cpi.hba_misc & PIM_NOSCAN) == 0) {
 3913                         union   ccb *scan_ccb;
 3914 
 3915                         /* Initiate bus rescan. */
 3916                         scan_ccb = xpt_alloc_ccb_nowait();
 3917                         if (scan_ccb != NULL) {
 3918                                 scan_ccb->ccb_h.path = path;
 3919                                 scan_ccb->ccb_h.func_code = XPT_SCAN_BUS;
 3920                                 scan_ccb->crcn.flags = 0;
 3921                                 xpt_rescan(scan_ccb);
 3922                         } else {
 3923                                 xpt_print(path,
 3924                                           "Can't allocate CCB to scan bus\n");
 3925                                 xpt_free_path(path);
 3926                         }
 3927                 } else
 3928                         xpt_free_path(path);
 3929         } else
 3930                 xpt_free_path(path);
 3931         return (CAM_SUCCESS);
 3932 }
 3933 
 3934 int32_t
 3935 xpt_bus_deregister(path_id_t pathid)
 3936 {
 3937         struct cam_path bus_path;
 3938         cam_status status;
 3939 
 3940         status = xpt_compile_path(&bus_path, NULL, pathid,
 3941                                   CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
 3942         if (status != CAM_REQ_CMP)
 3943                 return (status);
 3944 
 3945         xpt_async(AC_LOST_DEVICE, &bus_path, NULL);
 3946         xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL);
 3947 
 3948         /* Release the reference count held while registered. */
 3949         xpt_release_bus(bus_path.bus);
 3950         xpt_release_path(&bus_path);
 3951 
 3952         return (CAM_REQ_CMP);
 3953 }
 3954 
 3955 static path_id_t
 3956 xptnextfreepathid(void)
 3957 {
 3958         struct cam_eb *bus;
 3959         path_id_t pathid;
 3960         const char *strval;
 3961 
 3962         mtx_assert(&xsoftc.xpt_topo_lock, MA_OWNED);
 3963         pathid = 0;
 3964         bus = TAILQ_FIRST(&xsoftc.xpt_busses);
 3965 retry:
 3966         /* Find an unoccupied pathid */
 3967         while (bus != NULL && bus->path_id <= pathid) {
 3968                 if (bus->path_id == pathid)
 3969                         pathid++;
 3970                 bus = TAILQ_NEXT(bus, links);
 3971         }
 3972 
 3973         /*
 3974          * Ensure that this pathid is not reserved for
 3975          * a bus that may be registered in the future.
 3976          */
 3977         if (resource_string_value("scbus", pathid, "at", &strval) == 0) {
 3978                 ++pathid;
 3979                 /* Start the search over */
 3980                 goto retry;
 3981         }
 3982         return (pathid);
 3983 }
 3984 
 3985 static path_id_t
 3986 xptpathid(const char *sim_name, int sim_unit, int sim_bus)
 3987 {
 3988         path_id_t pathid;
 3989         int i, dunit, val;
 3990         char buf[32];
 3991         const char *dname;
 3992 
 3993         pathid = CAM_XPT_PATH_ID;
 3994         snprintf(buf, sizeof(buf), "%s%d", sim_name, sim_unit);
 3995         if (strcmp(buf, "xpt0") == 0 && sim_bus == 0)
 3996                 return (pathid);
 3997         i = 0;
 3998         while ((resource_find_match(&i, &dname, &dunit, "at", buf)) == 0) {
 3999                 if (strcmp(dname, "scbus")) {
 4000                         /* Avoid a bit of foot shooting. */
 4001                         continue;
 4002                 }
 4003                 if (dunit < 0)          /* unwired?! */
 4004                         continue;
 4005                 if (resource_int_value("scbus", dunit, "bus", &val) == 0) {
 4006                         if (sim_bus == val) {
 4007                                 pathid = dunit;
 4008                                 break;
 4009                         }
 4010                 } else if (sim_bus == 0) {
 4011                         /* Unspecified matches bus 0 */
 4012                         pathid = dunit;
 4013                         break;
 4014                 } else {
 4015                         printf("Ambiguous scbus configuration for %s%d "
 4016                                "bus %d, cannot wire down.  The kernel "
 4017                                "config entry for scbus%d should "
 4018                                "specify a controller bus.\n"
 4019                                "Scbus will be assigned dynamically.\n",
 4020                                sim_name, sim_unit, sim_bus, dunit);
 4021                         break;
 4022                 }
 4023         }
 4024 
 4025         if (pathid == CAM_XPT_PATH_ID)
 4026                 pathid = xptnextfreepathid();
 4027         return (pathid);
 4028 }
 4029 
 4030 static const char *
 4031 xpt_async_string(u_int32_t async_code)
 4032 {
 4033 
 4034         switch (async_code) {
 4035         case AC_BUS_RESET: return ("AC_BUS_RESET");
 4036         case AC_UNSOL_RESEL: return ("AC_UNSOL_RESEL");
 4037         case AC_SCSI_AEN: return ("AC_SCSI_AEN");
 4038         case AC_SENT_BDR: return ("AC_SENT_BDR");
 4039         case AC_PATH_REGISTERED: return ("AC_PATH_REGISTERED");
 4040         case AC_PATH_DEREGISTERED: return ("AC_PATH_DEREGISTERED");
 4041         case AC_FOUND_DEVICE: return ("AC_FOUND_DEVICE");
 4042         case AC_LOST_DEVICE: return ("AC_LOST_DEVICE");
 4043         case AC_TRANSFER_NEG: return ("AC_TRANSFER_NEG");
 4044         case AC_INQ_CHANGED: return ("AC_INQ_CHANGED");
 4045         case AC_GETDEV_CHANGED: return ("AC_GETDEV_CHANGED");
 4046         case AC_CONTRACT: return ("AC_CONTRACT");
 4047         case AC_ADVINFO_CHANGED: return ("AC_ADVINFO_CHANGED");
 4048         case AC_UNIT_ATTENTION: return ("AC_UNIT_ATTENTION");
 4049         }
 4050         return ("AC_UNKNOWN");
 4051 }
 4052 
 4053 static int
 4054 xpt_async_size(u_int32_t async_code)
 4055 {
 4056 
 4057         switch (async_code) {
 4058         case AC_BUS_RESET: return (0);
 4059         case AC_UNSOL_RESEL: return (0);
 4060         case AC_SCSI_AEN: return (0);
 4061         case AC_SENT_BDR: return (0);
 4062         case AC_PATH_REGISTERED: return (sizeof(struct ccb_pathinq));
 4063         case AC_PATH_DEREGISTERED: return (0);
 4064         case AC_FOUND_DEVICE: return (sizeof(struct ccb_getdev));
 4065         case AC_LOST_DEVICE: return (0);
 4066         case AC_TRANSFER_NEG: return (sizeof(struct ccb_trans_settings));
 4067         case AC_INQ_CHANGED: return (0);
 4068         case AC_GETDEV_CHANGED: return (0);
 4069         case AC_CONTRACT: return (sizeof(struct ac_contract));
 4070         case AC_ADVINFO_CHANGED: return (-1);
 4071         case AC_UNIT_ATTENTION: return (sizeof(struct ccb_scsiio));
 4072         }
 4073         return (0);
 4074 }
 4075 
 4076 static int
 4077 xpt_async_process_dev(struct cam_ed *device, void *arg)
 4078 {
 4079         union ccb *ccb = arg;
 4080         struct cam_path *path = ccb->ccb_h.path;
 4081         void *async_arg = ccb->casync.async_arg_ptr;
 4082         u_int32_t async_code = ccb->casync.async_code;
 4083         int relock;
 4084 
 4085         if (path->device != device
 4086          && path->device->lun_id != CAM_LUN_WILDCARD
 4087          && device->lun_id != CAM_LUN_WILDCARD)
 4088                 return (1);
 4089 
 4090         /*
 4091          * The async callback could free the device.
 4092          * If it is a broadcast async, it doesn't hold
 4093          * device reference, so take our own reference.
 4094          */
 4095         xpt_acquire_device(device);
 4096 
 4097         /*
 4098          * If async for specific device is to be delivered to
 4099          * the wildcard client, take the specific device lock.
 4100          * XXX: We may need a way for client to specify it.
 4101          */
 4102         if ((device->lun_id == CAM_LUN_WILDCARD &&
 4103              path->device->lun_id != CAM_LUN_WILDCARD) ||
 4104             (device->target->target_id == CAM_TARGET_WILDCARD &&
 4105              path->target->target_id != CAM_TARGET_WILDCARD) ||
 4106             (device->target->bus->path_id == CAM_BUS_WILDCARD &&
 4107              path->target->bus->path_id != CAM_BUS_WILDCARD)) {
 4108                 mtx_unlock(&device->device_mtx);
 4109                 xpt_path_lock(path);
 4110                 relock = 1;
 4111         } else
 4112                 relock = 0;
 4113 
 4114         (*(device->target->bus->xport->async))(async_code,
 4115             device->target->bus, device->target, device, async_arg);
 4116         xpt_async_bcast(&device->asyncs, async_code, path, async_arg);
 4117 
 4118         if (relock) {
 4119                 xpt_path_unlock(path);
 4120                 mtx_lock(&device->device_mtx);
 4121         }
 4122         xpt_release_device(device);
 4123         return (1);
 4124 }
 4125 
 4126 static int
 4127 xpt_async_process_tgt(struct cam_et *target, void *arg)
 4128 {
 4129         union ccb *ccb = arg;
 4130         struct cam_path *path = ccb->ccb_h.path;
 4131 
 4132         if (path->target != target
 4133          && path->target->target_id != CAM_TARGET_WILDCARD
 4134          && target->target_id != CAM_TARGET_WILDCARD)
 4135                 return (1);
 4136 
 4137         if (ccb->casync.async_code == AC_SENT_BDR) {
 4138                 /* Update our notion of when the last reset occurred */
 4139                 microtime(&target->last_reset);
 4140         }
 4141 
 4142         return (xptdevicetraverse(target, NULL, xpt_async_process_dev, ccb));
 4143 }
 4144 
 4145 static void
 4146 xpt_async_process(struct cam_periph *periph, union ccb *ccb)
 4147 {
 4148         struct cam_eb *bus;
 4149         struct cam_path *path;
 4150         void *async_arg;
 4151         u_int32_t async_code;
 4152 
 4153         path = ccb->ccb_h.path;
 4154         async_code = ccb->casync.async_code;
 4155         async_arg = ccb->casync.async_arg_ptr;
 4156         CAM_DEBUG(path, CAM_DEBUG_TRACE | CAM_DEBUG_INFO,
 4157             ("xpt_async(%s)\n", xpt_async_string(async_code)));
 4158         bus = path->bus;
 4159 
 4160         if (async_code == AC_BUS_RESET) {
 4161                 /* Update our notion of when the last reset occurred */
 4162                 microtime(&bus->last_reset);
 4163         }
 4164 
 4165         xpttargettraverse(bus, NULL, xpt_async_process_tgt, ccb);
 4166 
 4167         /*
 4168          * If this wasn't a fully wildcarded async, tell all
 4169          * clients that want all async events.
 4170          */
 4171         if (bus != xpt_periph->path->bus) {
 4172                 xpt_path_lock(xpt_periph->path);
 4173                 xpt_async_process_dev(xpt_periph->path->device, ccb);
 4174                 xpt_path_unlock(xpt_periph->path);
 4175         }
 4176 
 4177         if (path->device != NULL && path->device->lun_id != CAM_LUN_WILDCARD)
 4178                 xpt_release_devq(path, 1, TRUE);
 4179         else
 4180                 xpt_release_simq(path->bus->sim, TRUE);
 4181         if (ccb->casync.async_arg_size > 0)
 4182                 free(async_arg, M_CAMXPT);
 4183         xpt_free_path(path);
 4184         xpt_free_ccb(ccb);
 4185 }
 4186 
 4187 static void
 4188 xpt_async_bcast(struct async_list *async_head,
 4189                 u_int32_t async_code,
 4190                 struct cam_path *path, void *async_arg)
 4191 {
 4192         struct async_node *cur_entry;
 4193         int lock;
 4194 
 4195         cur_entry = SLIST_FIRST(async_head);
 4196         while (cur_entry != NULL) {
 4197                 struct async_node *next_entry;
 4198                 /*
 4199                  * Grab the next list entry before we call the current
 4200                  * entry's callback.  This is because the callback function
 4201                  * can delete its async callback entry.
 4202                  */
 4203                 next_entry = SLIST_NEXT(cur_entry, links);
 4204                 if ((cur_entry->event_enable & async_code) != 0) {
 4205                         lock = cur_entry->event_lock;
 4206                         if (lock)
 4207                                 CAM_SIM_LOCK(path->device->sim);
 4208                         cur_entry->callback(cur_entry->callback_arg,
 4209                                             async_code, path,
 4210                                             async_arg);
 4211                         if (lock)
 4212                                 CAM_SIM_UNLOCK(path->device->sim);
 4213                 }
 4214                 cur_entry = next_entry;
 4215         }
 4216 }
 4217 
 4218 void
 4219 xpt_async(u_int32_t async_code, struct cam_path *path, void *async_arg)
 4220 {
 4221         union ccb *ccb;
 4222         int size;
 4223 
 4224         ccb = xpt_alloc_ccb_nowait();
 4225         if (ccb == NULL) {
 4226                 xpt_print(path, "Can't allocate CCB to send %s\n",
 4227                     xpt_async_string(async_code));
 4228                 return;
 4229         }
 4230 
 4231         if (xpt_clone_path(&ccb->ccb_h.path, path) != CAM_REQ_CMP) {
 4232                 xpt_print(path, "Can't allocate path to send %s\n",
 4233                     xpt_async_string(async_code));
 4234                 xpt_free_ccb(ccb);
 4235                 return;
 4236         }
 4237         ccb->ccb_h.path->periph = NULL;
 4238         ccb->ccb_h.func_code = XPT_ASYNC;
 4239         ccb->ccb_h.cbfcnp = xpt_async_process;
 4240         ccb->ccb_h.flags |= CAM_UNLOCKED;
 4241         ccb->casync.async_code = async_code;
 4242         ccb->casync.async_arg_size = 0;
 4243         size = xpt_async_size(async_code);
 4244         if (size > 0 && async_arg != NULL) {
 4245                 ccb->casync.async_arg_ptr = malloc(size, M_CAMXPT, M_NOWAIT);
 4246                 if (ccb->casync.async_arg_ptr == NULL) {
 4247                         xpt_print(path, "Can't allocate argument to send %s\n",
 4248                             xpt_async_string(async_code));
 4249                         xpt_free_path(ccb->ccb_h.path);
 4250                         xpt_free_ccb(ccb);
 4251                         return;
 4252                 }
 4253                 memcpy(ccb->casync.async_arg_ptr, async_arg, size);
 4254                 ccb->casync.async_arg_size = size;
 4255         } else if (size < 0)
 4256                 ccb->casync.async_arg_size = size;
 4257         if (path->device != NULL && path->device->lun_id != CAM_LUN_WILDCARD)
 4258                 xpt_freeze_devq(path, 1);
 4259         else
 4260                 xpt_freeze_simq(path->bus->sim, 1);
 4261         xpt_done(ccb);
 4262 }
 4263 
 4264 static void
 4265 xpt_dev_async_default(u_int32_t async_code, struct cam_eb *bus,
 4266                       struct cam_et *target, struct cam_ed *device,
 4267                       void *async_arg)
 4268 {
 4269 
 4270         /*
 4271          * We only need to handle events for real devices.
 4272          */
 4273         if (target->target_id == CAM_TARGET_WILDCARD
 4274          || device->lun_id == CAM_LUN_WILDCARD)
 4275                 return;
 4276 
 4277         printf("%s called\n", __func__);
 4278 }
 4279 
 4280 static uint32_t
 4281 xpt_freeze_devq_device(struct cam_ed *dev, u_int count)
 4282 {
 4283         struct cam_devq *devq;
 4284         uint32_t freeze;
 4285 
 4286         devq = dev->sim->devq;
 4287         mtx_assert(&devq->send_mtx, MA_OWNED);
 4288         CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE,
 4289             ("xpt_freeze_devq_device(%d) %u->%u\n", count,
 4290             dev->ccbq.queue.qfrozen_cnt, dev->ccbq.queue.qfrozen_cnt + count));
 4291         freeze = (dev->ccbq.queue.qfrozen_cnt += count);
 4292         /* Remove frozen device from sendq. */
 4293         if (device_is_queued(dev))
 4294                 camq_remove(&devq->send_queue, dev->devq_entry.index);
 4295         return (freeze);
 4296 }
 4297 
 4298 u_int32_t
 4299 xpt_freeze_devq(struct cam_path *path, u_int count)
 4300 {
 4301         struct cam_ed   *dev = path->device;
 4302         struct cam_devq *devq;
 4303         uint32_t         freeze;
 4304 
 4305         devq = dev->sim->devq;
 4306         mtx_lock(&devq->send_mtx);
 4307         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_freeze_devq(%d)\n", count));
 4308         freeze = xpt_freeze_devq_device(dev, count);
 4309         mtx_unlock(&devq->send_mtx);
 4310         return (freeze);
 4311 }
 4312 
 4313 u_int32_t
 4314 xpt_freeze_simq(struct cam_sim *sim, u_int count)
 4315 {
 4316         struct cam_devq *devq;
 4317         uint32_t         freeze;
 4318 
 4319         devq = sim->devq;
 4320         mtx_lock(&devq->send_mtx);
 4321         freeze = (devq->send_queue.qfrozen_cnt += count);
 4322         mtx_unlock(&devq->send_mtx);
 4323         return (freeze);
 4324 }
 4325 
 4326 static void
 4327 xpt_release_devq_timeout(void *arg)
 4328 {
 4329         struct cam_ed *dev;
 4330         struct cam_devq *devq;
 4331 
 4332         dev = (struct cam_ed *)arg;
 4333         CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE, ("xpt_release_devq_timeout\n"));
 4334         devq = dev->sim->devq;
 4335         mtx_assert(&devq->send_mtx, MA_OWNED);
 4336         if (xpt_release_devq_device(dev, /*count*/1, /*run_queue*/TRUE))
 4337                 xpt_run_devq(devq);
 4338 }
 4339 
 4340 void
 4341 xpt_release_devq(struct cam_path *path, u_int count, int run_queue)
 4342 {
 4343         struct cam_ed *dev;
 4344         struct cam_devq *devq;
 4345 
 4346         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_devq(%d, %d)\n",
 4347             count, run_queue));
 4348         dev = path->device;
 4349         devq = dev->sim->devq;
 4350         mtx_lock(&devq->send_mtx);
 4351         if (xpt_release_devq_device(dev, count, run_queue))
 4352                 xpt_run_devq(dev->sim->devq);
 4353         mtx_unlock(&devq->send_mtx);
 4354 }
 4355 
 4356 static int
 4357 xpt_release_devq_device(struct cam_ed *dev, u_int count, int run_queue)
 4358 {
 4359 
 4360         mtx_assert(&dev->sim->devq->send_mtx, MA_OWNED);
 4361         CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE,
 4362             ("xpt_release_devq_device(%d, %d) %u->%u\n", count, run_queue,
 4363             dev->ccbq.queue.qfrozen_cnt, dev->ccbq.queue.qfrozen_cnt - count));
 4364         if (count > dev->ccbq.queue.qfrozen_cnt) {
 4365 #ifdef INVARIANTS
 4366                 printf("xpt_release_devq(): requested %u > present %u\n",
 4367                     count, dev->ccbq.queue.qfrozen_cnt);
 4368 #endif
 4369                 count = dev->ccbq.queue.qfrozen_cnt;
 4370         }
 4371         dev->ccbq.queue.qfrozen_cnt -= count;
 4372         if (dev->ccbq.queue.qfrozen_cnt == 0) {
 4373                 /*
 4374                  * No longer need to wait for a successful
 4375                  * command completion.
 4376                  */
 4377                 dev->flags &= ~CAM_DEV_REL_ON_COMPLETE;
 4378                 /*
 4379                  * Remove any timeouts that might be scheduled
 4380                  * to release this queue.
 4381                  */
 4382                 if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
 4383                         callout_stop(&dev->callout);
 4384                         dev->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING;
 4385                 }
 4386                 /*
 4387                  * Now that we are unfrozen schedule the
 4388                  * device so any pending transactions are
 4389                  * run.
 4390                  */
 4391                 xpt_schedule_devq(dev->sim->devq, dev);
 4392         } else
 4393                 run_queue = 0;
 4394         return (run_queue);
 4395 }
 4396 
 4397 void
 4398 xpt_release_simq(struct cam_sim *sim, int run_queue)
 4399 {
 4400         struct cam_devq *devq;
 4401 
 4402         devq = sim->devq;
 4403         mtx_lock(&devq->send_mtx);
 4404         if (devq->send_queue.qfrozen_cnt <= 0) {
 4405 #ifdef INVARIANTS
 4406                 printf("xpt_release_simq: requested 1 > present %u\n",
 4407                     devq->send_queue.qfrozen_cnt);
 4408 #endif
 4409         } else
 4410                 devq->send_queue.qfrozen_cnt--;
 4411         if (devq->send_queue.qfrozen_cnt == 0) {
 4412                 /*
 4413                  * If there is a timeout scheduled to release this
 4414                  * sim queue, remove it.  The queue frozen count is
 4415                  * already at 0.
 4416                  */
 4417                 if ((sim->flags & CAM_SIM_REL_TIMEOUT_PENDING) != 0){
 4418                         callout_stop(&sim->callout);
 4419                         sim->flags &= ~CAM_SIM_REL_TIMEOUT_PENDING;
 4420                 }
 4421                 if (run_queue) {
 4422                         /*
 4423                          * Now that we are unfrozen run the send queue.
 4424                          */
 4425                         xpt_run_devq(sim->devq);
 4426                 }
 4427         }
 4428         mtx_unlock(&devq->send_mtx);
 4429 }
 4430 
 4431 /*
 4432  * XXX Appears to be unused.
 4433  */
 4434 static void
 4435 xpt_release_simq_timeout(void *arg)
 4436 {
 4437         struct cam_sim *sim;
 4438 
 4439         sim = (struct cam_sim *)arg;
 4440         xpt_release_simq(sim, /* run_queue */ TRUE);
 4441 }
 4442 
 4443 void
 4444 xpt_done(union ccb *done_ccb)
 4445 {
 4446         struct cam_doneq *queue;
 4447         int     run, hash;
 4448 
 4449         CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_done\n"));
 4450         if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) == 0)
 4451                 return;
 4452 
 4453         hash = (done_ccb->ccb_h.path_id + done_ccb->ccb_h.target_id +
 4454             done_ccb->ccb_h.target_lun) % cam_num_doneqs;
 4455         queue = &cam_doneqs[hash];
 4456         mtx_lock(&queue->cam_doneq_mtx);
 4457         run = (queue->cam_doneq_sleep && STAILQ_EMPTY(&queue->cam_doneq));
 4458         STAILQ_INSERT_TAIL(&queue->cam_doneq, &done_ccb->ccb_h, sim_links.stqe);
 4459         done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX;
 4460         mtx_unlock(&queue->cam_doneq_mtx);
 4461         if (run)
 4462                 wakeup(&queue->cam_doneq);
 4463 }
 4464 
 4465 void
 4466 xpt_done_direct(union ccb *done_ccb)
 4467 {
 4468 
 4469         CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_done_direct\n"));
 4470         if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) == 0)
 4471                 return;
 4472 
 4473         xpt_done_process(&done_ccb->ccb_h);
 4474 }
 4475 
 4476 union ccb *
 4477 xpt_alloc_ccb()
 4478 {
 4479         union ccb *new_ccb;
 4480 
 4481         new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_WAITOK);
 4482         return (new_ccb);
 4483 }
 4484 
 4485 union ccb *
 4486 xpt_alloc_ccb_nowait()
 4487 {
 4488         union ccb *new_ccb;
 4489 
 4490         new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_NOWAIT);
 4491         return (new_ccb);
 4492 }
 4493 
 4494 void
 4495 xpt_free_ccb(union ccb *free_ccb)
 4496 {
 4497         free(free_ccb, M_CAMCCB);
 4498 }
 4499 
 4500 
 4501 
 4502 /* Private XPT functions */
 4503 
 4504 /*
 4505  * Get a CAM control block for the caller. Charge the structure to the device
 4506  * referenced by the path.  If we don't have sufficient resources to allocate
 4507  * more ccbs, we return NULL.
 4508  */
 4509 static union ccb *
 4510 xpt_get_ccb_nowait(struct cam_periph *periph)
 4511 {
 4512         union ccb *new_ccb;
 4513 
 4514         new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_NOWAIT);
 4515         if (new_ccb == NULL)
 4516                 return (NULL);
 4517         periph->periph_allocated++;
 4518         cam_ccbq_take_opening(&periph->path->device->ccbq);
 4519         return (new_ccb);
 4520 }
 4521 
 4522 static union ccb *
 4523 xpt_get_ccb(struct cam_periph *periph)
 4524 {
 4525         union ccb *new_ccb;
 4526 
 4527         cam_periph_unlock(periph);
 4528         new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_WAITOK);
 4529         cam_periph_lock(periph);
 4530         periph->periph_allocated++;
 4531         cam_ccbq_take_opening(&periph->path->device->ccbq);
 4532         return (new_ccb);
 4533 }
 4534 
 4535 union ccb *
 4536 cam_periph_getccb(struct cam_periph *periph, u_int32_t priority)
 4537 {
 4538         struct ccb_hdr *ccb_h;
 4539 
 4540         CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("cam_periph_getccb\n"));
 4541         cam_periph_assert(periph, MA_OWNED);
 4542         while ((ccb_h = SLIST_FIRST(&periph->ccb_list)) == NULL ||
 4543             ccb_h->pinfo.priority != priority) {
 4544                 if (priority < periph->immediate_priority) {
 4545                         periph->immediate_priority = priority;
 4546                         xpt_run_allocq(periph, 0);
 4547                 } else
 4548                         cam_periph_sleep(periph, &periph->ccb_list, PRIBIO,
 4549                             "cgticb", 0);
 4550         }
 4551         SLIST_REMOVE_HEAD(&periph->ccb_list, periph_links.sle);
 4552         return ((union ccb *)ccb_h);
 4553 }
 4554 
 4555 static void
 4556 xpt_acquire_bus(struct cam_eb *bus)
 4557 {
 4558 
 4559         xpt_lock_buses();
 4560         bus->refcount++;
 4561         xpt_unlock_buses();
 4562 }
 4563 
 4564 static void
 4565 xpt_release_bus(struct cam_eb *bus)
 4566 {
 4567 
 4568         xpt_lock_buses();
 4569         KASSERT(bus->refcount >= 1, ("bus->refcount >= 1"));
 4570         if (--bus->refcount > 0) {
 4571                 xpt_unlock_buses();
 4572                 return;
 4573         }
 4574         TAILQ_REMOVE(&xsoftc.xpt_busses, bus, links);
 4575         xsoftc.bus_generation++;
 4576         xpt_unlock_buses();
 4577         KASSERT(TAILQ_EMPTY(&bus->et_entries),
 4578             ("destroying bus, but target list is not empty"));
 4579         cam_sim_release(bus->sim);
 4580         mtx_destroy(&bus->eb_mtx);
 4581         free(bus, M_CAMXPT);
 4582 }
 4583 
 4584 static struct cam_et *
 4585 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id)
 4586 {
 4587         struct cam_et *cur_target, *target;
 4588 
 4589         mtx_assert(&xsoftc.xpt_topo_lock, MA_OWNED);
 4590         mtx_assert(&bus->eb_mtx, MA_OWNED);
 4591         target = (struct cam_et *)malloc(sizeof(*target), M_CAMXPT,
 4592                                          M_NOWAIT|M_ZERO);
 4593         if (target == NULL)
 4594                 return (NULL);
 4595 
 4596         TAILQ_INIT(&target->ed_entries);
 4597         target->bus = bus;
 4598         target->target_id = target_id;
 4599         target->refcount = 1;
 4600         target->generation = 0;
 4601         target->luns = NULL;
 4602         mtx_init(&target->luns_mtx, "CAM LUNs lock", NULL, MTX_DEF);
 4603         timevalclear(&target->last_reset);
 4604         /*
 4605          * Hold a reference to our parent bus so it
 4606          * will not go away before we do.
 4607          */
 4608         bus->refcount++;
 4609 
 4610         /* Insertion sort into our bus's target list */
 4611         cur_target = TAILQ_FIRST(&bus->et_entries);
 4612         while (cur_target != NULL && cur_target->target_id < target_id)
 4613                 cur_target = TAILQ_NEXT(cur_target, links);
 4614         if (cur_target != NULL) {
 4615                 TAILQ_INSERT_BEFORE(cur_target, target, links);
 4616         } else {
 4617                 TAILQ_INSERT_TAIL(&bus->et_entries, target, links);
 4618         }
 4619         bus->generation++;
 4620         return (target);
 4621 }
 4622 
 4623 static void
 4624 xpt_acquire_target(struct cam_et *target)
 4625 {
 4626         struct cam_eb *bus = target->bus;
 4627 
 4628         mtx_lock(&bus->eb_mtx);
 4629         target->refcount++;
 4630         mtx_unlock(&bus->eb_mtx);
 4631 }
 4632 
 4633 static void
 4634 xpt_release_target(struct cam_et *target)
 4635 {
 4636         struct cam_eb *bus = target->bus;
 4637 
 4638         mtx_lock(&bus->eb_mtx);
 4639         if (--target->refcount > 0) {
 4640                 mtx_unlock(&bus->eb_mtx);
 4641                 return;
 4642         }
 4643         TAILQ_REMOVE(&bus->et_entries, target, links);
 4644         bus->generation++;
 4645         mtx_unlock(&bus->eb_mtx);
 4646         KASSERT(TAILQ_EMPTY(&target->ed_entries),
 4647             ("destroying target, but device list is not empty"));
 4648         xpt_release_bus(bus);
 4649         mtx_destroy(&target->luns_mtx);
 4650         if (target->luns)
 4651                 free(target->luns, M_CAMXPT);
 4652         free(target, M_CAMXPT);
 4653 }
 4654 
 4655 static struct cam_ed *
 4656 xpt_alloc_device_default(struct cam_eb *bus, struct cam_et *target,
 4657                          lun_id_t lun_id)
 4658 {
 4659         struct cam_ed *device;
 4660 
 4661         device = xpt_alloc_device(bus, target, lun_id);
 4662         if (device == NULL)
 4663                 return (NULL);
 4664 
 4665         device->mintags = 1;
 4666         device->maxtags = 1;
 4667         return (device);
 4668 }
 4669 
 4670 static void
 4671 xpt_destroy_device(void *context, int pending)
 4672 {
 4673         struct cam_ed   *device = context;
 4674 
 4675         mtx_lock(&device->device_mtx);
 4676         mtx_destroy(&device->device_mtx);
 4677         free(device, M_CAMDEV);
 4678 }
 4679 
 4680 struct cam_ed *
 4681 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id)
 4682 {
 4683         struct cam_ed   *cur_device, *device;
 4684         struct cam_devq *devq;
 4685         cam_status status;
 4686 
 4687         mtx_assert(&bus->eb_mtx, MA_OWNED);
 4688         /* Make space for us in the device queue on our bus */
 4689         devq = bus->sim->devq;
 4690         mtx_lock(&devq->send_mtx);
 4691         status = cam_devq_resize(devq, devq->send_queue.array_size + 1);
 4692         mtx_unlock(&devq->send_mtx);
 4693         if (status != CAM_REQ_CMP)
 4694                 return (NULL);
 4695 
 4696         device = (struct cam_ed *)malloc(sizeof(*device),
 4697                                          M_CAMDEV, M_NOWAIT|M_ZERO);
 4698         if (device == NULL)
 4699                 return (NULL);
 4700 
 4701         cam_init_pinfo(&device->devq_entry);
 4702         device->target = target;
 4703         device->lun_id = lun_id;
 4704         device->sim = bus->sim;
 4705         if (cam_ccbq_init(&device->ccbq,
 4706                           bus->sim->max_dev_openings) != 0) {
 4707                 free(device, M_CAMDEV);
 4708                 return (NULL);
 4709         }
 4710         SLIST_INIT(&device->asyncs);
 4711         SLIST_INIT(&device->periphs);
 4712         device->generation = 0;
 4713         device->flags = CAM_DEV_UNCONFIGURED;
 4714         device->tag_delay_count = 0;
 4715         device->tag_saved_openings = 0;
 4716         device->refcount = 1;
 4717         mtx_init(&device->device_mtx, "CAM device lock", NULL, MTX_DEF);
 4718         callout_init_mtx(&device->callout, &devq->send_mtx, 0);
 4719         TASK_INIT(&device->device_destroy_task, 0, xpt_destroy_device, device);
 4720         /*
 4721          * Hold a reference to our parent bus so it
 4722          * will not go away before we do.
 4723          */
 4724         target->refcount++;
 4725 
 4726         cur_device = TAILQ_FIRST(&target->ed_entries);
 4727         while (cur_device != NULL && cur_device->lun_id < lun_id)
 4728                 cur_device = TAILQ_NEXT(cur_device, links);
 4729         if (cur_device != NULL)
 4730                 TAILQ_INSERT_BEFORE(cur_device, device, links);
 4731         else
 4732                 TAILQ_INSERT_TAIL(&target->ed_entries, device, links);
 4733         target->generation++;
 4734         return (device);
 4735 }
 4736 
 4737 void
 4738 xpt_acquire_device(struct cam_ed *device)
 4739 {
 4740         struct cam_eb *bus = device->target->bus;
 4741 
 4742         mtx_lock(&bus->eb_mtx);
 4743         device->refcount++;
 4744         mtx_unlock(&bus->eb_mtx);
 4745 }
 4746 
 4747 void
 4748 xpt_release_device(struct cam_ed *device)
 4749 {
 4750         struct cam_eb *bus = device->target->bus;
 4751         struct cam_devq *devq;
 4752 
 4753         mtx_lock(&bus->eb_mtx);
 4754         if (--device->refcount > 0) {
 4755                 mtx_unlock(&bus->eb_mtx);
 4756                 return;
 4757         }
 4758 
 4759         TAILQ_REMOVE(&device->target->ed_entries, device,links);
 4760         device->target->generation++;
 4761         mtx_unlock(&bus->eb_mtx);
 4762 
 4763         /* Release our slot in the devq */
 4764         devq = bus->sim->devq;
 4765         mtx_lock(&devq->send_mtx);
 4766         cam_devq_resize(devq, devq->send_queue.array_size - 1);
 4767         mtx_unlock(&devq->send_mtx);
 4768 
 4769         KASSERT(SLIST_EMPTY(&device->periphs),
 4770             ("destroying device, but periphs list is not empty"));
 4771         KASSERT(device->devq_entry.index == CAM_UNQUEUED_INDEX,
 4772             ("destroying device while still queued for ccbs"));
 4773 
 4774         if ((device->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0)
 4775                 callout_stop(&device->callout);
 4776 
 4777         xpt_release_target(device->target);
 4778 
 4779         cam_ccbq_fini(&device->ccbq);
 4780         /*
 4781          * Free allocated memory.  free(9) does nothing if the
 4782          * supplied pointer is NULL, so it is safe to call without
 4783          * checking.
 4784          */
 4785         free(device->supported_vpds, M_CAMXPT);
 4786         free(device->device_id, M_CAMXPT);
 4787         free(device->physpath, M_CAMXPT);
 4788         free(device->rcap_buf, M_CAMXPT);
 4789         free(device->serial_num, M_CAMXPT);
 4790         taskqueue_enqueue(xsoftc.xpt_taskq, &device->device_destroy_task);
 4791 }
 4792 
 4793 u_int32_t
 4794 xpt_dev_ccbq_resize(struct cam_path *path, int newopenings)
 4795 {
 4796         int     result;
 4797         struct  cam_ed *dev;
 4798 
 4799         dev = path->device;
 4800         mtx_lock(&dev->sim->devq->send_mtx);
 4801         result = cam_ccbq_resize(&dev->ccbq, newopenings);
 4802         mtx_unlock(&dev->sim->devq->send_mtx);
 4803         if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
 4804          || (dev->inq_flags & SID_CmdQue) != 0)
 4805                 dev->tag_saved_openings = newopenings;
 4806         return (result);
 4807 }
 4808 
 4809 static struct cam_eb *
 4810 xpt_find_bus(path_id_t path_id)
 4811 {
 4812         struct cam_eb *bus;
 4813 
 4814         xpt_lock_buses();
 4815         for (bus = TAILQ_FIRST(&xsoftc.xpt_busses);
 4816              bus != NULL;
 4817              bus = TAILQ_NEXT(bus, links)) {
 4818                 if (bus->path_id == path_id) {
 4819                         bus->refcount++;
 4820                         break;
 4821                 }
 4822         }
 4823         xpt_unlock_buses();
 4824         return (bus);
 4825 }
 4826 
 4827 static struct cam_et *
 4828 xpt_find_target(struct cam_eb *bus, target_id_t target_id)
 4829 {
 4830         struct cam_et *target;
 4831 
 4832         mtx_assert(&bus->eb_mtx, MA_OWNED);
 4833         for (target = TAILQ_FIRST(&bus->et_entries);
 4834              target != NULL;
 4835              target = TAILQ_NEXT(target, links)) {
 4836                 if (target->target_id == target_id) {
 4837                         target->refcount++;
 4838                         break;
 4839                 }
 4840         }
 4841         return (target);
 4842 }
 4843 
 4844 static struct cam_ed *
 4845 xpt_find_device(struct cam_et *target, lun_id_t lun_id)
 4846 {
 4847         struct cam_ed *device;
 4848 
 4849         mtx_assert(&target->bus->eb_mtx, MA_OWNED);
 4850         for (device = TAILQ_FIRST(&target->ed_entries);
 4851              device != NULL;
 4852              device = TAILQ_NEXT(device, links)) {
 4853                 if (device->lun_id == lun_id) {
 4854                         device->refcount++;
 4855                         break;
 4856                 }
 4857         }
 4858         return (device);
 4859 }
 4860 
 4861 void
 4862 xpt_start_tags(struct cam_path *path)
 4863 {
 4864         struct ccb_relsim crs;
 4865         struct cam_ed *device;
 4866         struct cam_sim *sim;
 4867         int    newopenings;
 4868 
 4869         device = path->device;
 4870         sim = path->bus->sim;
 4871         device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
 4872         xpt_freeze_devq(path, /*count*/1);
 4873         device->inq_flags |= SID_CmdQue;
 4874         if (device->tag_saved_openings != 0)
 4875                 newopenings = device->tag_saved_openings;
 4876         else
 4877                 newopenings = min(device->maxtags,
 4878                                   sim->max_tagged_dev_openings);
 4879         xpt_dev_ccbq_resize(path, newopenings);
 4880         xpt_async(AC_GETDEV_CHANGED, path, NULL);
 4881         xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL);
 4882         crs.ccb_h.func_code = XPT_REL_SIMQ;
 4883         crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
 4884         crs.openings
 4885             = crs.release_timeout
 4886             = crs.qfrozen_cnt
 4887             = 0;
 4888         xpt_action((union ccb *)&crs);
 4889 }
 4890 
 4891 void
 4892 xpt_stop_tags(struct cam_path *path)
 4893 {
 4894         struct ccb_relsim crs;
 4895         struct cam_ed *device;
 4896         struct cam_sim *sim;
 4897 
 4898         device = path->device;
 4899         sim = path->bus->sim;
 4900         device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
 4901         device->tag_delay_count = 0;
 4902         xpt_freeze_devq(path, /*count*/1);
 4903         device->inq_flags &= ~SID_CmdQue;
 4904         xpt_dev_ccbq_resize(path, sim->max_dev_openings);
 4905         xpt_async(AC_GETDEV_CHANGED, path, NULL);
 4906         xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL);
 4907         crs.ccb_h.func_code = XPT_REL_SIMQ;
 4908         crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
 4909         crs.openings
 4910             = crs.release_timeout
 4911             = crs.qfrozen_cnt
 4912             = 0;
 4913         xpt_action((union ccb *)&crs);
 4914 }
 4915 
 4916 static void
 4917 xpt_boot_delay(void *arg)
 4918 {
 4919 
 4920         xpt_release_boot();
 4921 }
 4922 
 4923 static void
 4924 xpt_config(void *arg)
 4925 {
 4926         /*
 4927          * Now that interrupts are enabled, go find our devices
 4928          */
 4929         if (taskqueue_start_threads(&xsoftc.xpt_taskq, 1, PRIBIO, "CAM taskq"))
 4930                 printf("xpt_config: failed to create taskqueue thread.\n");
 4931 
 4932         /* Setup debugging path */
 4933         if (cam_dflags != CAM_DEBUG_NONE) {
 4934                 if (xpt_create_path(&cam_dpath, NULL,
 4935                                     CAM_DEBUG_BUS, CAM_DEBUG_TARGET,
 4936                                     CAM_DEBUG_LUN) != CAM_REQ_CMP) {
 4937                         printf("xpt_config: xpt_create_path() failed for debug"
 4938                                " target %d:%d:%d, debugging disabled\n",
 4939                                CAM_DEBUG_BUS, CAM_DEBUG_TARGET, CAM_DEBUG_LUN);
 4940                         cam_dflags = CAM_DEBUG_NONE;
 4941                 }
 4942         } else
 4943                 cam_dpath = NULL;
 4944 
 4945         periphdriver_init(1);
 4946         xpt_hold_boot();
 4947         callout_init(&xsoftc.boot_callout, 1);
 4948         callout_reset(&xsoftc.boot_callout, hz * xsoftc.boot_delay / 1000,
 4949             xpt_boot_delay, NULL);
 4950         /* Fire up rescan thread. */
 4951         if (kproc_kthread_add(xpt_scanner_thread, NULL, &cam_proc, NULL, 0, 0,
 4952             "cam", "scanner")) {
 4953                 printf("xpt_config: failed to create rescan thread.\n");
 4954         }
 4955 }
 4956 
 4957 void
 4958 xpt_hold_boot(void)
 4959 {
 4960         xpt_lock_buses();
 4961         xsoftc.buses_to_config++;
 4962         xpt_unlock_buses();
 4963 }
 4964 
 4965 void
 4966 xpt_release_boot(void)
 4967 {
 4968         xpt_lock_buses();
 4969         xsoftc.buses_to_config--;
 4970         if (xsoftc.buses_to_config == 0 && xsoftc.buses_config_done == 0) {
 4971                 struct  xpt_task *task;
 4972 
 4973                 xsoftc.buses_config_done = 1;
 4974                 xpt_unlock_buses();
 4975                 /* Call manually because we don't have any busses */
 4976                 task = malloc(sizeof(struct xpt_task), M_CAMXPT, M_NOWAIT);
 4977                 if (task != NULL) {
 4978                         TASK_INIT(&task->task, 0, xpt_finishconfig_task, task);
 4979                         taskqueue_enqueue(taskqueue_thread, &task->task);
 4980                 }
 4981         } else
 4982                 xpt_unlock_buses();
 4983 }
 4984 
 4985 /*
 4986  * If the given device only has one peripheral attached to it, and if that
 4987  * peripheral is the passthrough driver, announce it.  This insures that the
 4988  * user sees some sort of announcement for every peripheral in their system.
 4989  */
 4990 static int
 4991 xptpassannouncefunc(struct cam_ed *device, void *arg)
 4992 {
 4993         struct cam_periph *periph;
 4994         int i;
 4995 
 4996         for (periph = SLIST_FIRST(&device->periphs), i = 0; periph != NULL;
 4997              periph = SLIST_NEXT(periph, periph_links), i++);
 4998 
 4999         periph = SLIST_FIRST(&device->periphs);
 5000         if ((i == 1)
 5001          && (strncmp(periph->periph_name, "pass", 4) == 0))
 5002                 xpt_announce_periph(periph, NULL);
 5003 
 5004         return(1);
 5005 }
 5006 
 5007 static void
 5008 xpt_finishconfig_task(void *context, int pending)
 5009 {
 5010 
 5011         periphdriver_init(2);
 5012         /*
 5013          * Check for devices with no "standard" peripheral driver
 5014          * attached.  For any devices like that, announce the
 5015          * passthrough driver so the user will see something.
 5016          */
 5017         if (!bootverbose)
 5018                 xpt_for_all_devices(xptpassannouncefunc, NULL);
 5019 
 5020         /* Release our hook so that the boot can continue. */
 5021         config_intrhook_disestablish(xsoftc.xpt_config_hook);
 5022         free(xsoftc.xpt_config_hook, M_CAMXPT);
 5023         xsoftc.xpt_config_hook = NULL;
 5024 
 5025         free(context, M_CAMXPT);
 5026 }
 5027 
 5028 cam_status
 5029 xpt_register_async(int event, ac_callback_t *cbfunc, void *cbarg,
 5030                    struct cam_path *path)
 5031 {
 5032         struct ccb_setasync csa;
 5033         cam_status status;
 5034         int xptpath = 0;
 5035 
 5036         if (path == NULL) {
 5037                 status = xpt_create_path(&path, /*periph*/NULL, CAM_XPT_PATH_ID,
 5038                                          CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
 5039                 if (status != CAM_REQ_CMP)
 5040                         return (status);
 5041                 xpt_path_lock(path);
 5042                 xptpath = 1;
 5043         }
 5044 
 5045         xpt_setup_ccb(&csa.ccb_h, path, CAM_PRIORITY_NORMAL);
 5046         csa.ccb_h.func_code = XPT_SASYNC_CB;
 5047         csa.event_enable = event;
 5048         csa.callback = cbfunc;
 5049         csa.callback_arg = cbarg;
 5050         xpt_action((union ccb *)&csa);
 5051         status = csa.ccb_h.status;
 5052 
 5053         if (xptpath) {
 5054                 xpt_path_unlock(path);
 5055                 xpt_free_path(path);
 5056         }
 5057 
 5058         if ((status == CAM_REQ_CMP) &&
 5059             (csa.event_enable & AC_FOUND_DEVICE)) {
 5060                 /*
 5061                  * Get this peripheral up to date with all
 5062                  * the currently existing devices.
 5063                  */
 5064                 xpt_for_all_devices(xptsetasyncfunc, &csa);
 5065         }
 5066         if ((status == CAM_REQ_CMP) &&
 5067             (csa.event_enable & AC_PATH_REGISTERED)) {
 5068                 /*
 5069                  * Get this peripheral up to date with all
 5070                  * the currently existing busses.
 5071                  */
 5072                 xpt_for_all_busses(xptsetasyncbusfunc, &csa);
 5073         }
 5074 
 5075         return (status);
 5076 }
 5077 
 5078 static void
 5079 xptaction(struct cam_sim *sim, union ccb *work_ccb)
 5080 {
 5081         CAM_DEBUG(work_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xptaction\n"));
 5082 
 5083         switch (work_ccb->ccb_h.func_code) {
 5084         /* Common cases first */
 5085         case XPT_PATH_INQ:              /* Path routing inquiry */
 5086         {
 5087                 struct ccb_pathinq *cpi;
 5088 
 5089                 cpi = &work_ccb->cpi;
 5090                 cpi->version_num = 1; /* XXX??? */
 5091                 cpi->hba_inquiry = 0;
 5092                 cpi->target_sprt = 0;
 5093                 cpi->hba_misc = 0;
 5094                 cpi->hba_eng_cnt = 0;
 5095                 cpi->max_target = 0;
 5096                 cpi->max_lun = 0;
 5097                 cpi->initiator_id = 0;
 5098                 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
 5099                 strncpy(cpi->hba_vid, "", HBA_IDLEN);
 5100                 strncpy(cpi->dev_name, sim->sim_name, DEV_IDLEN);
 5101                 cpi->unit_number = sim->unit_number;
 5102                 cpi->bus_id = sim->bus_id;
 5103                 cpi->base_transfer_speed = 0;
 5104                 cpi->protocol = PROTO_UNSPECIFIED;
 5105                 cpi->protocol_version = PROTO_VERSION_UNSPECIFIED;
 5106                 cpi->transport = XPORT_UNSPECIFIED;
 5107                 cpi->transport_version = XPORT_VERSION_UNSPECIFIED;
 5108                 cpi->ccb_h.status = CAM_REQ_CMP;
 5109                 xpt_done(work_ccb);
 5110                 break;
 5111         }
 5112         default:
 5113                 work_ccb->ccb_h.status = CAM_REQ_INVALID;
 5114                 xpt_done(work_ccb);
 5115                 break;
 5116         }
 5117 }
 5118 
 5119 /*
 5120  * The xpt as a "controller" has no interrupt sources, so polling
 5121  * is a no-op.
 5122  */
 5123 static void
 5124 xptpoll(struct cam_sim *sim)
 5125 {
 5126 }
 5127 
 5128 void
 5129 xpt_lock_buses(void)
 5130 {
 5131         mtx_lock(&xsoftc.xpt_topo_lock);
 5132 }
 5133 
 5134 void
 5135 xpt_unlock_buses(void)
 5136 {
 5137         mtx_unlock(&xsoftc.xpt_topo_lock);
 5138 }
 5139 
 5140 struct mtx *
 5141 xpt_path_mtx(struct cam_path *path)
 5142 {
 5143 
 5144         return (&path->device->device_mtx);
 5145 }
 5146 
 5147 static void
 5148 xpt_done_process(struct ccb_hdr *ccb_h)
 5149 {
 5150         struct cam_sim *sim;
 5151         struct cam_devq *devq;
 5152         struct mtx *mtx = NULL;
 5153 
 5154         if (ccb_h->flags & CAM_HIGH_POWER) {
 5155                 struct highpowerlist    *hphead;
 5156                 struct cam_ed           *device;
 5157 
 5158                 mtx_lock(&xsoftc.xpt_highpower_lock);
 5159                 hphead = &xsoftc.highpowerq;
 5160 
 5161                 device = STAILQ_FIRST(hphead);
 5162 
 5163                 /*
 5164                  * Increment the count since this command is done.
 5165                  */
 5166                 xsoftc.num_highpower++;
 5167 
 5168                 /*
 5169                  * Any high powered commands queued up?
 5170                  */
 5171                 if (device != NULL) {
 5172 
 5173                         STAILQ_REMOVE_HEAD(hphead, highpowerq_entry);
 5174                         mtx_unlock(&xsoftc.xpt_highpower_lock);
 5175 
 5176                         mtx_lock(&device->sim->devq->send_mtx);
 5177                         xpt_release_devq_device(device,
 5178                                          /*count*/1, /*runqueue*/TRUE);
 5179                         mtx_unlock(&device->sim->devq->send_mtx);
 5180                 } else
 5181                         mtx_unlock(&xsoftc.xpt_highpower_lock);
 5182         }
 5183 
 5184         sim = ccb_h->path->bus->sim;
 5185 
 5186         if (ccb_h->status & CAM_RELEASE_SIMQ) {
 5187                 xpt_release_simq(sim, /*run_queue*/FALSE);
 5188                 ccb_h->status &= ~CAM_RELEASE_SIMQ;
 5189         }
 5190 
 5191         if ((ccb_h->flags & CAM_DEV_QFRZDIS)
 5192          && (ccb_h->status & CAM_DEV_QFRZN)) {
 5193                 xpt_release_devq(ccb_h->path, /*count*/1, /*run_queue*/TRUE);
 5194                 ccb_h->status &= ~CAM_DEV_QFRZN;
 5195         }
 5196 
 5197         devq = sim->devq;
 5198         if ((ccb_h->func_code & XPT_FC_USER_CCB) == 0) {
 5199                 struct cam_ed *dev = ccb_h->path->device;
 5200 
 5201                 mtx_lock(&devq->send_mtx);
 5202                 devq->send_active--;
 5203                 devq->send_openings++;
 5204                 cam_ccbq_ccb_done(&dev->ccbq, (union ccb *)ccb_h);
 5205 
 5206                 if (((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
 5207                   && (dev->ccbq.dev_active == 0))) {
 5208                         dev->flags &= ~CAM_DEV_REL_ON_QUEUE_EMPTY;
 5209                         xpt_release_devq_device(dev, /*count*/1,
 5210                                          /*run_queue*/FALSE);
 5211                 }
 5212 
 5213                 if (((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0
 5214                   && (ccb_h->status&CAM_STATUS_MASK) != CAM_REQUEUE_REQ)) {
 5215                         dev->flags &= ~CAM_DEV_REL_ON_COMPLETE;
 5216                         xpt_release_devq_device(dev, /*count*/1,
 5217                                          /*run_queue*/FALSE);
 5218                 }
 5219 
 5220                 if (!device_is_queued(dev))
 5221                         (void)xpt_schedule_devq(devq, dev);
 5222                 xpt_run_devq(devq);
 5223                 mtx_unlock(&devq->send_mtx);
 5224 
 5225                 if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0) {
 5226                         mtx = xpt_path_mtx(ccb_h->path);
 5227                         mtx_lock(mtx);
 5228 
 5229                         if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
 5230                          && (--dev->tag_delay_count == 0))
 5231                                 xpt_start_tags(ccb_h->path);
 5232                 }
 5233         }
 5234 
 5235         if ((ccb_h->flags & CAM_UNLOCKED) == 0) {
 5236                 if (mtx == NULL) {
 5237                         mtx = xpt_path_mtx(ccb_h->path);
 5238                         mtx_lock(mtx);
 5239                 }
 5240         } else {
 5241                 if (mtx != NULL) {
 5242                         mtx_unlock(mtx);
 5243                         mtx = NULL;
 5244                 }
 5245         }
 5246 
 5247         /* Call the peripheral driver's callback */
 5248         ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
 5249         (*ccb_h->cbfcnp)(ccb_h->path->periph, (union ccb *)ccb_h);
 5250         if (mtx != NULL)
 5251                 mtx_unlock(mtx);
 5252 }
 5253 
 5254 void
 5255 xpt_done_td(void *arg)
 5256 {
 5257         struct cam_doneq *queue = arg;
 5258         struct ccb_hdr *ccb_h;
 5259         STAILQ_HEAD(, ccb_hdr)  doneq;
 5260 
 5261         STAILQ_INIT(&doneq);
 5262         mtx_lock(&queue->cam_doneq_mtx);
 5263         while (1) {
 5264                 while (STAILQ_EMPTY(&queue->cam_doneq)) {
 5265                         queue->cam_doneq_sleep = 1;
 5266                         msleep(&queue->cam_doneq, &queue->cam_doneq_mtx,
 5267                             PRIBIO, "-", 0);
 5268                         queue->cam_doneq_sleep = 0;
 5269                 }
 5270                 STAILQ_CONCAT(&doneq, &queue->cam_doneq);
 5271                 mtx_unlock(&queue->cam_doneq_mtx);
 5272 
 5273                 THREAD_NO_SLEEPING();
 5274                 while ((ccb_h = STAILQ_FIRST(&doneq)) != NULL) {
 5275                         STAILQ_REMOVE_HEAD(&doneq, sim_links.stqe);
 5276                         xpt_done_process(ccb_h);
 5277                 }
 5278                 THREAD_SLEEPING_OK();
 5279 
 5280                 mtx_lock(&queue->cam_doneq_mtx);
 5281         }
 5282 }
 5283 
 5284 static void
 5285 camisr_runqueue(void)
 5286 {
 5287         struct  ccb_hdr *ccb_h;
 5288         struct cam_doneq *queue;
 5289         int i;
 5290 
 5291         /* Process global queues. */
 5292         for (i = 0; i < cam_num_doneqs; i++) {
 5293                 queue = &cam_doneqs[i];
 5294                 mtx_lock(&queue->cam_doneq_mtx);
 5295                 while ((ccb_h = STAILQ_FIRST(&queue->cam_doneq)) != NULL) {
 5296                         STAILQ_REMOVE_HEAD(&queue->cam_doneq, sim_links.stqe);
 5297                         mtx_unlock(&queue->cam_doneq_mtx);
 5298                         xpt_done_process(ccb_h);
 5299                         mtx_lock(&queue->cam_doneq_mtx);
 5300                 }
 5301                 mtx_unlock(&queue->cam_doneq_mtx);
 5302         }
 5303 }

Cache object: 2354c5c19087d7cf4477b6fe84d8d220


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.