The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/cam/cam_xpt.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Implementation of the Common Access Method Transport (XPT) layer.
    3  *
    4  * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs.
    5  * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry.
    6  * All rights reserved.
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions, and the following disclaimer,
   13  *    without modification, immediately at the beginning of the file.
   14  * 2. The name of the author may not be used to endorse or promote products
   15  *    derived from this software without specific prior written permission.
   16  *
   17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   20  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
   21  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   27  * SUCH DAMAGE.
   28  */
   29 
   30 #include <sys/cdefs.h>
   31 __FBSDID("$FreeBSD: releng/9.2/sys/cam/cam_xpt.c 253037 2013-07-08 15:54:38Z mav $");
   32 
   33 #include <sys/param.h>
   34 #include <sys/bus.h>
   35 #include <sys/systm.h>
   36 #include <sys/types.h>
   37 #include <sys/malloc.h>
   38 #include <sys/kernel.h>
   39 #include <sys/time.h>
   40 #include <sys/conf.h>
   41 #include <sys/fcntl.h>
   42 #include <sys/interrupt.h>
   43 #include <sys/sbuf.h>
   44 #include <sys/taskqueue.h>
   45 
   46 #include <sys/lock.h>
   47 #include <sys/mutex.h>
   48 #include <sys/sysctl.h>
   49 #include <sys/kthread.h>
   50 
   51 #include <cam/cam.h>
   52 #include <cam/cam_ccb.h>
   53 #include <cam/cam_periph.h>
   54 #include <cam/cam_queue.h>
   55 #include <cam/cam_sim.h>
   56 #include <cam/cam_xpt.h>
   57 #include <cam/cam_xpt_sim.h>
   58 #include <cam/cam_xpt_periph.h>
   59 #include <cam/cam_xpt_internal.h>
   60 #include <cam/cam_debug.h>
   61 #include <cam/cam_compat.h>
   62 
   63 #include <cam/scsi/scsi_all.h>
   64 #include <cam/scsi/scsi_message.h>
   65 #include <cam/scsi/scsi_pass.h>
   66 
   67 #include <machine/md_var.h>     /* geometry translation */
   68 #include <machine/stdarg.h>     /* for xpt_print below */
   69 
   70 #include "opt_cam.h"
   71 
   72 /*
   73  * This is the maximum number of high powered commands (e.g. start unit)
   74  * that can be outstanding at a particular time.
   75  */
   76 #ifndef CAM_MAX_HIGHPOWER
   77 #define CAM_MAX_HIGHPOWER  4
   78 #endif
   79 
   80 /* Datastructures internal to the xpt layer */
   81 MALLOC_DEFINE(M_CAMXPT, "CAM XPT", "CAM XPT buffers");
   82 MALLOC_DEFINE(M_CAMDEV, "CAM DEV", "CAM devices");
   83 MALLOC_DEFINE(M_CAMCCB, "CAM CCB", "CAM CCBs");
   84 MALLOC_DEFINE(M_CAMPATH, "CAM path", "CAM paths");
   85 
   86 /* Object for defering XPT actions to a taskqueue */
   87 struct xpt_task {
   88         struct task     task;
   89         void            *data1;
   90         uintptr_t       data2;
   91 };
   92 
   93 typedef enum {
   94         XPT_FLAG_OPEN           = 0x01
   95 } xpt_flags;
   96 
   97 struct xpt_softc {
   98         xpt_flags               flags;
   99         u_int32_t               xpt_generation;
  100 
  101         /* number of high powered commands that can go through right now */
  102         STAILQ_HEAD(highpowerlist, ccb_hdr)     highpowerq;
  103         int                     num_highpower;
  104 
  105         /* queue for handling async rescan requests. */
  106         TAILQ_HEAD(, ccb_hdr) ccb_scanq;
  107         int buses_to_config;
  108         int buses_config_done;
  109 
  110         /* Registered busses */
  111         TAILQ_HEAD(,cam_eb)     xpt_busses;
  112         u_int                   bus_generation;
  113 
  114         struct intr_config_hook *xpt_config_hook;
  115 
  116         int                     boot_delay;
  117         struct callout          boot_callout;
  118 
  119         struct mtx              xpt_topo_lock;
  120         struct mtx              xpt_lock;
  121 };
  122 
  123 typedef enum {
  124         DM_RET_COPY             = 0x01,
  125         DM_RET_FLAG_MASK        = 0x0f,
  126         DM_RET_NONE             = 0x00,
  127         DM_RET_STOP             = 0x10,
  128         DM_RET_DESCEND          = 0x20,
  129         DM_RET_ERROR            = 0x30,
  130         DM_RET_ACTION_MASK      = 0xf0
  131 } dev_match_ret;
  132 
  133 typedef enum {
  134         XPT_DEPTH_BUS,
  135         XPT_DEPTH_TARGET,
  136         XPT_DEPTH_DEVICE,
  137         XPT_DEPTH_PERIPH
  138 } xpt_traverse_depth;
  139 
  140 struct xpt_traverse_config {
  141         xpt_traverse_depth      depth;
  142         void                    *tr_func;
  143         void                    *tr_arg;
  144 };
  145 
  146 typedef int     xpt_busfunc_t (struct cam_eb *bus, void *arg);
  147 typedef int     xpt_targetfunc_t (struct cam_et *target, void *arg);
  148 typedef int     xpt_devicefunc_t (struct cam_ed *device, void *arg);
  149 typedef int     xpt_periphfunc_t (struct cam_periph *periph, void *arg);
  150 typedef int     xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg);
  151 
  152 /* Transport layer configuration information */
  153 static struct xpt_softc xsoftc;
  154 
  155 TUNABLE_INT("kern.cam.boot_delay", &xsoftc.boot_delay);
  156 SYSCTL_INT(_kern_cam, OID_AUTO, boot_delay, CTLFLAG_RDTUN,
  157            &xsoftc.boot_delay, 0, "Bus registration wait time");
  158 
  159 /* Queues for our software interrupt handler */
  160 typedef TAILQ_HEAD(cam_isrq, ccb_hdr) cam_isrq_t;
  161 typedef TAILQ_HEAD(cam_simq, cam_sim) cam_simq_t;
  162 static cam_simq_t cam_simq;
  163 static struct mtx cam_simq_lock;
  164 
  165 /* Pointers to software interrupt handlers */
  166 static void *cambio_ih;
  167 
  168 struct cam_periph *xpt_periph;
  169 
  170 static periph_init_t xpt_periph_init;
  171 
  172 static struct periph_driver xpt_driver =
  173 {
  174         xpt_periph_init, "xpt",
  175         TAILQ_HEAD_INITIALIZER(xpt_driver.units), /* generation */ 0,
  176         CAM_PERIPH_DRV_EARLY
  177 };
  178 
  179 PERIPHDRIVER_DECLARE(xpt, xpt_driver);
  180 
  181 static d_open_t xptopen;
  182 static d_close_t xptclose;
  183 static d_ioctl_t xptioctl;
  184 static d_ioctl_t xptdoioctl;
  185 
  186 static struct cdevsw xpt_cdevsw = {
  187         .d_version =    D_VERSION,
  188         .d_flags =      0,
  189         .d_open =       xptopen,
  190         .d_close =      xptclose,
  191         .d_ioctl =      xptioctl,
  192         .d_name =       "xpt",
  193 };
  194 
  195 /* Storage for debugging datastructures */
  196 struct cam_path *cam_dpath;
  197 u_int32_t cam_dflags = CAM_DEBUG_FLAGS;
  198 TUNABLE_INT("kern.cam.dflags", &cam_dflags);
  199 SYSCTL_UINT(_kern_cam, OID_AUTO, dflags, CTLFLAG_RW,
  200         &cam_dflags, 0, "Enabled debug flags");
  201 u_int32_t cam_debug_delay = CAM_DEBUG_DELAY;
  202 TUNABLE_INT("kern.cam.debug_delay", &cam_debug_delay);
  203 SYSCTL_UINT(_kern_cam, OID_AUTO, debug_delay, CTLFLAG_RW,
  204         &cam_debug_delay, 0, "Delay in us after each debug message");
  205 
  206 /* Our boot-time initialization hook */
  207 static int cam_module_event_handler(module_t, int /*modeventtype_t*/, void *);
  208 
  209 static moduledata_t cam_moduledata = {
  210         "cam",
  211         cam_module_event_handler,
  212         NULL
  213 };
  214 
  215 static int      xpt_init(void *);
  216 
  217 DECLARE_MODULE(cam, cam_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND);
  218 MODULE_VERSION(cam, 1);
  219 
  220 
  221 static void             xpt_async_bcast(struct async_list *async_head,
  222                                         u_int32_t async_code,
  223                                         struct cam_path *path,
  224                                         void *async_arg);
  225 static path_id_t xptnextfreepathid(void);
  226 static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus);
  227 static union ccb *xpt_get_ccb(struct cam_ed *device);
  228 static void      xpt_run_dev_allocq(struct cam_eb *bus);
  229 static void      xpt_run_dev_sendq(struct cam_eb *bus);
  230 static timeout_t xpt_release_devq_timeout;
  231 static void      xpt_release_simq_timeout(void *arg) __unused;
  232 static void      xpt_release_bus(struct cam_eb *bus);
  233 static void      xpt_release_devq_device(struct cam_ed *dev, cam_rl rl,
  234                     u_int count, int run_queue);
  235 static struct cam_et*
  236                  xpt_alloc_target(struct cam_eb *bus, target_id_t target_id);
  237 static void      xpt_release_target(struct cam_et *target);
  238 static struct cam_eb*
  239                  xpt_find_bus(path_id_t path_id);
  240 static struct cam_et*
  241                  xpt_find_target(struct cam_eb *bus, target_id_t target_id);
  242 static struct cam_ed*
  243                  xpt_find_device(struct cam_et *target, lun_id_t lun_id);
  244 static void      xpt_config(void *arg);
  245 static xpt_devicefunc_t xptpassannouncefunc;
  246 static void      xptaction(struct cam_sim *sim, union ccb *work_ccb);
  247 static void      xptpoll(struct cam_sim *sim);
  248 static void      camisr(void *);
  249 static void      camisr_runqueue(void *);
  250 static dev_match_ret    xptbusmatch(struct dev_match_pattern *patterns,
  251                                     u_int num_patterns, struct cam_eb *bus);
  252 static dev_match_ret    xptdevicematch(struct dev_match_pattern *patterns,
  253                                        u_int num_patterns,
  254                                        struct cam_ed *device);
  255 static dev_match_ret    xptperiphmatch(struct dev_match_pattern *patterns,
  256                                        u_int num_patterns,
  257                                        struct cam_periph *periph);
  258 static xpt_busfunc_t    xptedtbusfunc;
  259 static xpt_targetfunc_t xptedttargetfunc;
  260 static xpt_devicefunc_t xptedtdevicefunc;
  261 static xpt_periphfunc_t xptedtperiphfunc;
  262 static xpt_pdrvfunc_t   xptplistpdrvfunc;
  263 static xpt_periphfunc_t xptplistperiphfunc;
  264 static int              xptedtmatch(struct ccb_dev_match *cdm);
  265 static int              xptperiphlistmatch(struct ccb_dev_match *cdm);
  266 static int              xptbustraverse(struct cam_eb *start_bus,
  267                                        xpt_busfunc_t *tr_func, void *arg);
  268 static int              xpttargettraverse(struct cam_eb *bus,
  269                                           struct cam_et *start_target,
  270                                           xpt_targetfunc_t *tr_func, void *arg);
  271 static int              xptdevicetraverse(struct cam_et *target,
  272                                           struct cam_ed *start_device,
  273                                           xpt_devicefunc_t *tr_func, void *arg);
  274 static int              xptperiphtraverse(struct cam_ed *device,
  275                                           struct cam_periph *start_periph,
  276                                           xpt_periphfunc_t *tr_func, void *arg);
  277 static int              xptpdrvtraverse(struct periph_driver **start_pdrv,
  278                                         xpt_pdrvfunc_t *tr_func, void *arg);
  279 static int              xptpdperiphtraverse(struct periph_driver **pdrv,
  280                                             struct cam_periph *start_periph,
  281                                             xpt_periphfunc_t *tr_func,
  282                                             void *arg);
  283 static xpt_busfunc_t    xptdefbusfunc;
  284 static xpt_targetfunc_t xptdeftargetfunc;
  285 static xpt_devicefunc_t xptdefdevicefunc;
  286 static xpt_periphfunc_t xptdefperiphfunc;
  287 static void             xpt_finishconfig_task(void *context, int pending);
  288 static void             xpt_dev_async_default(u_int32_t async_code,
  289                                               struct cam_eb *bus,
  290                                               struct cam_et *target,
  291                                               struct cam_ed *device,
  292                                               void *async_arg);
  293 static struct cam_ed *  xpt_alloc_device_default(struct cam_eb *bus,
  294                                                  struct cam_et *target,
  295                                                  lun_id_t lun_id);
  296 static xpt_devicefunc_t xptsetasyncfunc;
  297 static xpt_busfunc_t    xptsetasyncbusfunc;
  298 static cam_status       xptregister(struct cam_periph *periph,
  299                                     void *arg);
  300 static __inline int periph_is_queued(struct cam_periph *periph);
  301 static __inline int device_is_alloc_queued(struct cam_ed *device);
  302 static __inline int device_is_send_queued(struct cam_ed *device);
  303 
  304 static __inline int
  305 xpt_schedule_dev_allocq(struct cam_eb *bus, struct cam_ed *dev)
  306 {
  307         int retval;
  308 
  309         if ((dev->drvq.entries > 0) &&
  310             (dev->ccbq.devq_openings > 0) &&
  311             (cam_ccbq_frozen(&dev->ccbq, CAM_PRIORITY_TO_RL(
  312                 CAMQ_GET_PRIO(&dev->drvq))) == 0)) {
  313                 /*
  314                  * The priority of a device waiting for CCB resources
  315                  * is that of the highest priority peripheral driver
  316                  * enqueued.
  317                  */
  318                 retval = xpt_schedule_dev(&bus->sim->devq->alloc_queue,
  319                                           &dev->alloc_ccb_entry.pinfo,
  320                                           CAMQ_GET_PRIO(&dev->drvq));
  321         } else {
  322                 retval = 0;
  323         }
  324 
  325         return (retval);
  326 }
  327 
  328 static __inline int
  329 xpt_schedule_dev_sendq(struct cam_eb *bus, struct cam_ed *dev)
  330 {
  331         int     retval;
  332 
  333         if ((dev->ccbq.queue.entries > 0) &&
  334             (dev->ccbq.dev_openings > 0) &&
  335             (cam_ccbq_frozen_top(&dev->ccbq) == 0)) {
  336                 /*
  337                  * The priority of a device waiting for controller
  338                  * resources is that of the highest priority CCB
  339                  * enqueued.
  340                  */
  341                 retval =
  342                     xpt_schedule_dev(&bus->sim->devq->send_queue,
  343                                      &dev->send_ccb_entry.pinfo,
  344                                      CAMQ_GET_PRIO(&dev->ccbq.queue));
  345         } else {
  346                 retval = 0;
  347         }
  348         return (retval);
  349 }
  350 
  351 static __inline int
  352 periph_is_queued(struct cam_periph *periph)
  353 {
  354         return (periph->pinfo.index != CAM_UNQUEUED_INDEX);
  355 }
  356 
  357 static __inline int
  358 device_is_alloc_queued(struct cam_ed *device)
  359 {
  360         return (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
  361 }
  362 
  363 static __inline int
  364 device_is_send_queued(struct cam_ed *device)
  365 {
  366         return (device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
  367 }
  368 
  369 static void
  370 xpt_periph_init()
  371 {
  372         make_dev(&xpt_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "xpt0");
  373 }
  374 
  375 static void
  376 xptdone(struct cam_periph *periph, union ccb *done_ccb)
  377 {
  378         /* Caller will release the CCB */
  379         wakeup(&done_ccb->ccb_h.cbfcnp);
  380 }
  381 
  382 static int
  383 xptopen(struct cdev *dev, int flags, int fmt, struct thread *td)
  384 {
  385 
  386         /*
  387          * Only allow read-write access.
  388          */
  389         if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0))
  390                 return(EPERM);
  391 
  392         /*
  393          * We don't allow nonblocking access.
  394          */
  395         if ((flags & O_NONBLOCK) != 0) {
  396                 printf("%s: can't do nonblocking access\n", devtoname(dev));
  397                 return(ENODEV);
  398         }
  399 
  400         /* Mark ourselves open */
  401         mtx_lock(&xsoftc.xpt_lock);
  402         xsoftc.flags |= XPT_FLAG_OPEN;
  403         mtx_unlock(&xsoftc.xpt_lock);
  404 
  405         return(0);
  406 }
  407 
  408 static int
  409 xptclose(struct cdev *dev, int flag, int fmt, struct thread *td)
  410 {
  411 
  412         /* Mark ourselves closed */
  413         mtx_lock(&xsoftc.xpt_lock);
  414         xsoftc.flags &= ~XPT_FLAG_OPEN;
  415         mtx_unlock(&xsoftc.xpt_lock);
  416 
  417         return(0);
  418 }
  419 
  420 /*
  421  * Don't automatically grab the xpt softc lock here even though this is going
  422  * through the xpt device.  The xpt device is really just a back door for
  423  * accessing other devices and SIMs, so the right thing to do is to grab
  424  * the appropriate SIM lock once the bus/SIM is located.
  425  */
  426 static int
  427 xptioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
  428 {
  429         int error;
  430 
  431         if ((error = xptdoioctl(dev, cmd, addr, flag, td)) == ENOTTY) {
  432                 error = cam_compat_ioctl(dev, &cmd, &addr, &flag, td);
  433                 if (error == EAGAIN)
  434                         return (xptdoioctl(dev, cmd, addr, flag, td));
  435         }
  436         return (error);
  437 }
  438         
  439 static int
  440 xptdoioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
  441 {
  442         int error;
  443 
  444         error = 0;
  445 
  446         switch(cmd) {
  447         /*
  448          * For the transport layer CAMIOCOMMAND ioctl, we really only want
  449          * to accept CCB types that don't quite make sense to send through a
  450          * passthrough driver. XPT_PATH_INQ is an exception to this, as stated
  451          * in the CAM spec.
  452          */
  453         case CAMIOCOMMAND: {
  454                 union ccb *ccb;
  455                 union ccb *inccb;
  456                 struct cam_eb *bus;
  457 
  458                 inccb = (union ccb *)addr;
  459 
  460                 bus = xpt_find_bus(inccb->ccb_h.path_id);
  461                 if (bus == NULL)
  462                         return (EINVAL);
  463 
  464                 switch (inccb->ccb_h.func_code) {
  465                 case XPT_SCAN_BUS:
  466                 case XPT_RESET_BUS:
  467                         if (inccb->ccb_h.target_id != CAM_TARGET_WILDCARD ||
  468                             inccb->ccb_h.target_lun != CAM_LUN_WILDCARD) {
  469                                 xpt_release_bus(bus);
  470                                 return (EINVAL);
  471                         }
  472                         break;
  473                 case XPT_SCAN_TGT:
  474                         if (inccb->ccb_h.target_id == CAM_TARGET_WILDCARD ||
  475                             inccb->ccb_h.target_lun != CAM_LUN_WILDCARD) {
  476                                 xpt_release_bus(bus);
  477                                 return (EINVAL);
  478                         }
  479                         break;
  480                 default:
  481                         break;
  482                 }
  483 
  484                 switch(inccb->ccb_h.func_code) {
  485                 case XPT_SCAN_BUS:
  486                 case XPT_RESET_BUS:
  487                 case XPT_PATH_INQ:
  488                 case XPT_ENG_INQ:
  489                 case XPT_SCAN_LUN:
  490                 case XPT_SCAN_TGT:
  491 
  492                         ccb = xpt_alloc_ccb();
  493 
  494                         CAM_SIM_LOCK(bus->sim);
  495 
  496                         /*
  497                          * Create a path using the bus, target, and lun the
  498                          * user passed in.
  499                          */
  500                         if (xpt_create_path(&ccb->ccb_h.path, NULL,
  501                                             inccb->ccb_h.path_id,
  502                                             inccb->ccb_h.target_id,
  503                                             inccb->ccb_h.target_lun) !=
  504                                             CAM_REQ_CMP){
  505                                 error = EINVAL;
  506                                 CAM_SIM_UNLOCK(bus->sim);
  507                                 xpt_free_ccb(ccb);
  508                                 break;
  509                         }
  510                         /* Ensure all of our fields are correct */
  511                         xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path,
  512                                       inccb->ccb_h.pinfo.priority);
  513                         xpt_merge_ccb(ccb, inccb);
  514                         ccb->ccb_h.cbfcnp = xptdone;
  515                         cam_periph_runccb(ccb, NULL, 0, 0, NULL);
  516                         bcopy(ccb, inccb, sizeof(union ccb));
  517                         xpt_free_path(ccb->ccb_h.path);
  518                         xpt_free_ccb(ccb);
  519                         CAM_SIM_UNLOCK(bus->sim);
  520                         break;
  521 
  522                 case XPT_DEBUG: {
  523                         union ccb ccb;
  524 
  525                         /*
  526                          * This is an immediate CCB, so it's okay to
  527                          * allocate it on the stack.
  528                          */
  529 
  530                         CAM_SIM_LOCK(bus->sim);
  531 
  532                         /*
  533                          * Create a path using the bus, target, and lun the
  534                          * user passed in.
  535                          */
  536                         if (xpt_create_path(&ccb.ccb_h.path, NULL,
  537                                             inccb->ccb_h.path_id,
  538                                             inccb->ccb_h.target_id,
  539                                             inccb->ccb_h.target_lun) !=
  540                                             CAM_REQ_CMP){
  541                                 error = EINVAL;
  542                                 CAM_SIM_UNLOCK(bus->sim);
  543                                 break;
  544                         }
  545                         /* Ensure all of our fields are correct */
  546                         xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path,
  547                                       inccb->ccb_h.pinfo.priority);
  548                         xpt_merge_ccb(&ccb, inccb);
  549                         ccb.ccb_h.cbfcnp = xptdone;
  550                         xpt_action(&ccb);
  551                         bcopy(&ccb, inccb, sizeof(union ccb));
  552                         xpt_free_path(ccb.ccb_h.path);
  553                         CAM_SIM_UNLOCK(bus->sim);
  554                         break;
  555 
  556                 }
  557                 case XPT_DEV_MATCH: {
  558                         struct cam_periph_map_info mapinfo;
  559                         struct cam_path *old_path;
  560 
  561                         /*
  562                          * We can't deal with physical addresses for this
  563                          * type of transaction.
  564                          */
  565                         if ((inccb->ccb_h.flags & CAM_DATA_MASK) !=
  566                             CAM_DATA_VADDR) {
  567                                 error = EINVAL;
  568                                 break;
  569                         }
  570 
  571                         /*
  572                          * Save this in case the caller had it set to
  573                          * something in particular.
  574                          */
  575                         old_path = inccb->ccb_h.path;
  576 
  577                         /*
  578                          * We really don't need a path for the matching
  579                          * code.  The path is needed because of the
  580                          * debugging statements in xpt_action().  They
  581                          * assume that the CCB has a valid path.
  582                          */
  583                         inccb->ccb_h.path = xpt_periph->path;
  584 
  585                         bzero(&mapinfo, sizeof(mapinfo));
  586 
  587                         /*
  588                          * Map the pattern and match buffers into kernel
  589                          * virtual address space.
  590                          */
  591                         error = cam_periph_mapmem(inccb, &mapinfo);
  592 
  593                         if (error) {
  594                                 inccb->ccb_h.path = old_path;
  595                                 break;
  596                         }
  597 
  598                         /*
  599                          * This is an immediate CCB, we can send it on directly.
  600                          */
  601                         CAM_SIM_LOCK(xpt_path_sim(xpt_periph->path));
  602                         xpt_action(inccb);
  603                         CAM_SIM_UNLOCK(xpt_path_sim(xpt_periph->path));
  604 
  605                         /*
  606                          * Map the buffers back into user space.
  607                          */
  608                         cam_periph_unmapmem(inccb, &mapinfo);
  609 
  610                         inccb->ccb_h.path = old_path;
  611 
  612                         error = 0;
  613                         break;
  614                 }
  615                 default:
  616                         error = ENOTSUP;
  617                         break;
  618                 }
  619                 xpt_release_bus(bus);
  620                 break;
  621         }
  622         /*
  623          * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input,
  624          * with the periphal driver name and unit name filled in.  The other
  625          * fields don't really matter as input.  The passthrough driver name
  626          * ("pass"), and unit number are passed back in the ccb.  The current
  627          * device generation number, and the index into the device peripheral
  628          * driver list, and the status are also passed back.  Note that
  629          * since we do everything in one pass, unlike the XPT_GDEVLIST ccb,
  630          * we never return a status of CAM_GDEVLIST_LIST_CHANGED.  It is
  631          * (or rather should be) impossible for the device peripheral driver
  632          * list to change since we look at the whole thing in one pass, and
  633          * we do it with lock protection.
  634          *
  635          */
  636         case CAMGETPASSTHRU: {
  637                 union ccb *ccb;
  638                 struct cam_periph *periph;
  639                 struct periph_driver **p_drv;
  640                 char   *name;
  641                 u_int unit;
  642                 u_int cur_generation;
  643                 int base_periph_found;
  644                 int splbreaknum;
  645 
  646                 ccb = (union ccb *)addr;
  647                 unit = ccb->cgdl.unit_number;
  648                 name = ccb->cgdl.periph_name;
  649                 /*
  650                  * Every 100 devices, we want to drop our lock protection to
  651                  * give the software interrupt handler a chance to run.
  652                  * Most systems won't run into this check, but this should
  653                  * avoid starvation in the software interrupt handler in
  654                  * large systems.
  655                  */
  656                 splbreaknum = 100;
  657 
  658                 ccb = (union ccb *)addr;
  659 
  660                 base_periph_found = 0;
  661 
  662                 /*
  663                  * Sanity check -- make sure we don't get a null peripheral
  664                  * driver name.
  665                  */
  666                 if (*ccb->cgdl.periph_name == '\0') {
  667                         error = EINVAL;
  668                         break;
  669                 }
  670 
  671                 /* Keep the list from changing while we traverse it */
  672                 xpt_lock_buses();
  673 ptstartover:
  674                 cur_generation = xsoftc.xpt_generation;
  675 
  676                 /* first find our driver in the list of drivers */
  677                 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++)
  678                         if (strcmp((*p_drv)->driver_name, name) == 0)
  679                                 break;
  680 
  681                 if (*p_drv == NULL) {
  682                         xpt_unlock_buses();
  683                         ccb->ccb_h.status = CAM_REQ_CMP_ERR;
  684                         ccb->cgdl.status = CAM_GDEVLIST_ERROR;
  685                         *ccb->cgdl.periph_name = '\0';
  686                         ccb->cgdl.unit_number = 0;
  687                         error = ENOENT;
  688                         break;
  689                 }
  690 
  691                 /*
  692                  * Run through every peripheral instance of this driver
  693                  * and check to see whether it matches the unit passed
  694                  * in by the user.  If it does, get out of the loops and
  695                  * find the passthrough driver associated with that
  696                  * peripheral driver.
  697                  */
  698                 for (periph = TAILQ_FIRST(&(*p_drv)->units); periph != NULL;
  699                      periph = TAILQ_NEXT(periph, unit_links)) {
  700 
  701                         if (periph->unit_number == unit) {
  702                                 break;
  703                         } else if (--splbreaknum == 0) {
  704                                 xpt_unlock_buses();
  705                                 xpt_lock_buses();
  706                                 splbreaknum = 100;
  707                                 if (cur_generation != xsoftc.xpt_generation)
  708                                        goto ptstartover;
  709                         }
  710                 }
  711                 /*
  712                  * If we found the peripheral driver that the user passed
  713                  * in, go through all of the peripheral drivers for that
  714                  * particular device and look for a passthrough driver.
  715                  */
  716                 if (periph != NULL) {
  717                         struct cam_ed *device;
  718                         int i;
  719 
  720                         base_periph_found = 1;
  721                         device = periph->path->device;
  722                         for (i = 0, periph = SLIST_FIRST(&device->periphs);
  723                              periph != NULL;
  724                              periph = SLIST_NEXT(periph, periph_links), i++) {
  725                                 /*
  726                                  * Check to see whether we have a
  727                                  * passthrough device or not.
  728                                  */
  729                                 if (strcmp(periph->periph_name, "pass") == 0) {
  730                                         /*
  731                                          * Fill in the getdevlist fields.
  732                                          */
  733                                         strcpy(ccb->cgdl.periph_name,
  734                                                periph->periph_name);
  735                                         ccb->cgdl.unit_number =
  736                                                 periph->unit_number;
  737                                         if (SLIST_NEXT(periph, periph_links))
  738                                                 ccb->cgdl.status =
  739                                                         CAM_GDEVLIST_MORE_DEVS;
  740                                         else
  741                                                 ccb->cgdl.status =
  742                                                        CAM_GDEVLIST_LAST_DEVICE;
  743                                         ccb->cgdl.generation =
  744                                                 device->generation;
  745                                         ccb->cgdl.index = i;
  746                                         /*
  747                                          * Fill in some CCB header fields
  748                                          * that the user may want.
  749                                          */
  750                                         ccb->ccb_h.path_id =
  751                                                 periph->path->bus->path_id;
  752                                         ccb->ccb_h.target_id =
  753                                                 periph->path->target->target_id;
  754                                         ccb->ccb_h.target_lun =
  755                                                 periph->path->device->lun_id;
  756                                         ccb->ccb_h.status = CAM_REQ_CMP;
  757                                         break;
  758                                 }
  759                         }
  760                 }
  761 
  762                 /*
  763                  * If the periph is null here, one of two things has
  764                  * happened.  The first possibility is that we couldn't
  765                  * find the unit number of the particular peripheral driver
  766                  * that the user is asking about.  e.g. the user asks for
  767                  * the passthrough driver for "da11".  We find the list of
  768                  * "da" peripherals all right, but there is no unit 11.
  769                  * The other possibility is that we went through the list
  770                  * of peripheral drivers attached to the device structure,
  771                  * but didn't find one with the name "pass".  Either way,
  772                  * we return ENOENT, since we couldn't find something.
  773                  */
  774                 if (periph == NULL) {
  775                         ccb->ccb_h.status = CAM_REQ_CMP_ERR;
  776                         ccb->cgdl.status = CAM_GDEVLIST_ERROR;
  777                         *ccb->cgdl.periph_name = '\0';
  778                         ccb->cgdl.unit_number = 0;
  779                         error = ENOENT;
  780                         /*
  781                          * It is unfortunate that this is even necessary,
  782                          * but there are many, many clueless users out there.
  783                          * If this is true, the user is looking for the
  784                          * passthrough driver, but doesn't have one in his
  785                          * kernel.
  786                          */
  787                         if (base_periph_found == 1) {
  788                                 printf("xptioctl: pass driver is not in the "
  789                                        "kernel\n");
  790                                 printf("xptioctl: put \"device pass\" in "
  791                                        "your kernel config file\n");
  792                         }
  793                 }
  794                 xpt_unlock_buses();
  795                 break;
  796                 }
  797         default:
  798                 error = ENOTTY;
  799                 break;
  800         }
  801 
  802         return(error);
  803 }
  804 
  805 static int
  806 cam_module_event_handler(module_t mod, int what, void *arg)
  807 {
  808         int error;
  809 
  810         switch (what) {
  811         case MOD_LOAD:
  812                 if ((error = xpt_init(NULL)) != 0)
  813                         return (error);
  814                 break;
  815         case MOD_UNLOAD:
  816                 return EBUSY;
  817         default:
  818                 return EOPNOTSUPP;
  819         }
  820 
  821         return 0;
  822 }
  823 
  824 static void
  825 xpt_rescan_done(struct cam_periph *periph, union ccb *done_ccb)
  826 {
  827 
  828         if (done_ccb->ccb_h.ppriv_ptr1 == NULL) {
  829                 xpt_free_path(done_ccb->ccb_h.path);
  830                 xpt_free_ccb(done_ccb);
  831         } else {
  832                 done_ccb->ccb_h.cbfcnp = done_ccb->ccb_h.ppriv_ptr1;
  833                 (*done_ccb->ccb_h.cbfcnp)(periph, done_ccb);
  834         }
  835         xpt_release_boot();
  836 }
  837 
  838 /* thread to handle bus rescans */
  839 static void
  840 xpt_scanner_thread(void *dummy)
  841 {
  842         union ccb       *ccb;
  843         struct cam_sim  *sim;
  844 
  845         xpt_lock_buses();
  846         for (;;) {
  847                 if (TAILQ_EMPTY(&xsoftc.ccb_scanq))
  848                         msleep(&xsoftc.ccb_scanq, &xsoftc.xpt_topo_lock, PRIBIO,
  849                                "ccb_scanq", 0);
  850                 if ((ccb = (union ccb *)TAILQ_FIRST(&xsoftc.ccb_scanq)) != NULL) {
  851                         TAILQ_REMOVE(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe);
  852                         xpt_unlock_buses();
  853 
  854                         sim = ccb->ccb_h.path->bus->sim;
  855                         CAM_SIM_LOCK(sim);
  856                         xpt_action(ccb);
  857                         CAM_SIM_UNLOCK(sim);
  858 
  859                         xpt_lock_buses();
  860                 }
  861         }
  862 }
  863 
  864 void
  865 xpt_rescan(union ccb *ccb)
  866 {
  867         struct ccb_hdr *hdr;
  868 
  869         /* Prepare request */
  870         if (ccb->ccb_h.path->target->target_id == CAM_TARGET_WILDCARD &&
  871             ccb->ccb_h.path->device->lun_id == CAM_LUN_WILDCARD)
  872                 ccb->ccb_h.func_code = XPT_SCAN_BUS;
  873         else if (ccb->ccb_h.path->target->target_id != CAM_TARGET_WILDCARD &&
  874             ccb->ccb_h.path->device->lun_id == CAM_LUN_WILDCARD)
  875                 ccb->ccb_h.func_code = XPT_SCAN_TGT;
  876         else if (ccb->ccb_h.path->target->target_id != CAM_TARGET_WILDCARD &&
  877             ccb->ccb_h.path->device->lun_id != CAM_LUN_WILDCARD)
  878                 ccb->ccb_h.func_code = XPT_SCAN_LUN;
  879         else {
  880                 xpt_print(ccb->ccb_h.path, "illegal scan path\n");
  881                 xpt_free_path(ccb->ccb_h.path);
  882                 xpt_free_ccb(ccb);
  883                 return;
  884         }
  885         ccb->ccb_h.ppriv_ptr1 = ccb->ccb_h.cbfcnp;
  886         ccb->ccb_h.cbfcnp = xpt_rescan_done;
  887         xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, CAM_PRIORITY_XPT);
  888         /* Don't make duplicate entries for the same paths. */
  889         xpt_lock_buses();
  890         if (ccb->ccb_h.ppriv_ptr1 == NULL) {
  891                 TAILQ_FOREACH(hdr, &xsoftc.ccb_scanq, sim_links.tqe) {
  892                         if (xpt_path_comp(hdr->path, ccb->ccb_h.path) == 0) {
  893                                 wakeup(&xsoftc.ccb_scanq);
  894                                 xpt_unlock_buses();
  895                                 xpt_print(ccb->ccb_h.path, "rescan already queued\n");
  896                                 xpt_free_path(ccb->ccb_h.path);
  897                                 xpt_free_ccb(ccb);
  898                                 return;
  899                         }
  900                 }
  901         }
  902         TAILQ_INSERT_TAIL(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe);
  903         xsoftc.buses_to_config++;
  904         wakeup(&xsoftc.ccb_scanq);
  905         xpt_unlock_buses();
  906 }
  907 
  908 /* Functions accessed by the peripheral drivers */
  909 static int
  910 xpt_init(void *dummy)
  911 {
  912         struct cam_sim *xpt_sim;
  913         struct cam_path *path;
  914         struct cam_devq *devq;
  915         cam_status status;
  916 
  917         TAILQ_INIT(&xsoftc.xpt_busses);
  918         TAILQ_INIT(&cam_simq);
  919         TAILQ_INIT(&xsoftc.ccb_scanq);
  920         STAILQ_INIT(&xsoftc.highpowerq);
  921         xsoftc.num_highpower = CAM_MAX_HIGHPOWER;
  922 
  923         mtx_init(&cam_simq_lock, "CAM SIMQ lock", NULL, MTX_DEF);
  924         mtx_init(&xsoftc.xpt_lock, "XPT lock", NULL, MTX_DEF);
  925         mtx_init(&xsoftc.xpt_topo_lock, "XPT topology lock", NULL, MTX_DEF);
  926 
  927 #ifdef CAM_BOOT_DELAY
  928         /*
  929          * Override this value at compile time to assist our users
  930          * who don't use loader to boot a kernel.
  931          */
  932         xsoftc.boot_delay = CAM_BOOT_DELAY;
  933 #endif
  934         /*
  935          * The xpt layer is, itself, the equivelent of a SIM.
  936          * Allow 16 ccbs in the ccb pool for it.  This should
  937          * give decent parallelism when we probe busses and
  938          * perform other XPT functions.
  939          */
  940         devq = cam_simq_alloc(16);
  941         xpt_sim = cam_sim_alloc(xptaction,
  942                                 xptpoll,
  943                                 "xpt",
  944                                 /*softc*/NULL,
  945                                 /*unit*/0,
  946                                 /*mtx*/&xsoftc.xpt_lock,
  947                                 /*max_dev_transactions*/0,
  948                                 /*max_tagged_dev_transactions*/0,
  949                                 devq);
  950         if (xpt_sim == NULL)
  951                 return (ENOMEM);
  952 
  953         mtx_lock(&xsoftc.xpt_lock);
  954         if ((status = xpt_bus_register(xpt_sim, NULL, 0)) != CAM_SUCCESS) {
  955                 mtx_unlock(&xsoftc.xpt_lock);
  956                 printf("xpt_init: xpt_bus_register failed with status %#x,"
  957                        " failing attach\n", status);
  958                 return (EINVAL);
  959         }
  960 
  961         /*
  962          * Looking at the XPT from the SIM layer, the XPT is
  963          * the equivelent of a peripheral driver.  Allocate
  964          * a peripheral driver entry for us.
  965          */
  966         if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID,
  967                                       CAM_TARGET_WILDCARD,
  968                                       CAM_LUN_WILDCARD)) != CAM_REQ_CMP) {
  969                 mtx_unlock(&xsoftc.xpt_lock);
  970                 printf("xpt_init: xpt_create_path failed with status %#x,"
  971                        " failing attach\n", status);
  972                 return (EINVAL);
  973         }
  974 
  975         cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO,
  976                          path, NULL, 0, xpt_sim);
  977         xpt_free_path(path);
  978         mtx_unlock(&xsoftc.xpt_lock);
  979         /* Install our software interrupt handlers */
  980         swi_add(NULL, "cambio", camisr, NULL, SWI_CAMBIO, INTR_MPSAFE, &cambio_ih);
  981         /*
  982          * Register a callback for when interrupts are enabled.
  983          */
  984         xsoftc.xpt_config_hook =
  985             (struct intr_config_hook *)malloc(sizeof(struct intr_config_hook),
  986                                               M_CAMXPT, M_NOWAIT | M_ZERO);
  987         if (xsoftc.xpt_config_hook == NULL) {
  988                 printf("xpt_init: Cannot malloc config hook "
  989                        "- failing attach\n");
  990                 return (ENOMEM);
  991         }
  992         xsoftc.xpt_config_hook->ich_func = xpt_config;
  993         if (config_intrhook_establish(xsoftc.xpt_config_hook) != 0) {
  994                 free (xsoftc.xpt_config_hook, M_CAMXPT);
  995                 printf("xpt_init: config_intrhook_establish failed "
  996                        "- failing attach\n");
  997         }
  998 
  999         return (0);
 1000 }
 1001 
 1002 static cam_status
 1003 xptregister(struct cam_periph *periph, void *arg)
 1004 {
 1005         struct cam_sim *xpt_sim;
 1006 
 1007         if (periph == NULL) {
 1008                 printf("xptregister: periph was NULL!!\n");
 1009                 return(CAM_REQ_CMP_ERR);
 1010         }
 1011 
 1012         xpt_sim = (struct cam_sim *)arg;
 1013         xpt_sim->softc = periph;
 1014         xpt_periph = periph;
 1015         periph->softc = NULL;
 1016 
 1017         return(CAM_REQ_CMP);
 1018 }
 1019 
 1020 int32_t
 1021 xpt_add_periph(struct cam_periph *periph)
 1022 {
 1023         struct cam_ed *device;
 1024         int32_t  status;
 1025         struct periph_list *periph_head;
 1026 
 1027         mtx_assert(periph->sim->mtx, MA_OWNED);
 1028 
 1029         device = periph->path->device;
 1030 
 1031         periph_head = &device->periphs;
 1032 
 1033         status = CAM_REQ_CMP;
 1034 
 1035         if (device != NULL) {
 1036                 /*
 1037                  * Make room for this peripheral
 1038                  * so it will fit in the queue
 1039                  * when it's scheduled to run
 1040                  */
 1041                 status = camq_resize(&device->drvq,
 1042                                      device->drvq.array_size + 1);
 1043 
 1044                 device->generation++;
 1045 
 1046                 SLIST_INSERT_HEAD(periph_head, periph, periph_links);
 1047         }
 1048 
 1049         xpt_lock_buses();
 1050         xsoftc.xpt_generation++;
 1051         xpt_unlock_buses();
 1052 
 1053         return (status);
 1054 }
 1055 
 1056 void
 1057 xpt_remove_periph(struct cam_periph *periph, int topology_lock_held)
 1058 {
 1059         struct cam_ed *device;
 1060 
 1061         mtx_assert(periph->sim->mtx, MA_OWNED);
 1062 
 1063         device = periph->path->device;
 1064 
 1065         if (device != NULL) {
 1066                 struct periph_list *periph_head;
 1067 
 1068                 periph_head = &device->periphs;
 1069 
 1070                 /* Release the slot for this peripheral */
 1071                 camq_resize(&device->drvq, device->drvq.array_size - 1);
 1072 
 1073                 device->generation++;
 1074 
 1075                 SLIST_REMOVE(periph_head, periph, cam_periph, periph_links);
 1076         }
 1077 
 1078         if (topology_lock_held == 0)
 1079                 xpt_lock_buses();
 1080 
 1081         xsoftc.xpt_generation++;
 1082 
 1083         if (topology_lock_held == 0)
 1084                 xpt_unlock_buses();
 1085 }
 1086 
 1087 
 1088 void
 1089 xpt_announce_periph(struct cam_periph *periph, char *announce_string)
 1090 {
 1091         struct  cam_path *path = periph->path;
 1092 
 1093         mtx_assert(periph->sim->mtx, MA_OWNED);
 1094 
 1095         printf("%s%d at %s%d bus %d scbus%d target %d lun %d\n",
 1096                periph->periph_name, periph->unit_number,
 1097                path->bus->sim->sim_name,
 1098                path->bus->sim->unit_number,
 1099                path->bus->sim->bus_id,
 1100                path->bus->path_id,
 1101                path->target->target_id,
 1102                path->device->lun_id);
 1103         printf("%s%d: ", periph->periph_name, periph->unit_number);
 1104         if (path->device->protocol == PROTO_SCSI)
 1105                 scsi_print_inquiry(&path->device->inq_data);
 1106         else if (path->device->protocol == PROTO_ATA ||
 1107             path->device->protocol == PROTO_SATAPM)
 1108                 ata_print_ident(&path->device->ident_data);
 1109         else if (path->device->protocol == PROTO_SEMB)
 1110                 semb_print_ident(
 1111                     (struct sep_identify_data *)&path->device->ident_data);
 1112         else
 1113                 printf("Unknown protocol device\n");
 1114         if (bootverbose && path->device->serial_num_len > 0) {
 1115                 /* Don't wrap the screen  - print only the first 60 chars */
 1116                 printf("%s%d: Serial Number %.60s\n", periph->periph_name,
 1117                        periph->unit_number, path->device->serial_num);
 1118         }
 1119         /* Announce transport details. */
 1120         (*(path->bus->xport->announce))(periph);
 1121         /* Announce command queueing. */
 1122         if (path->device->inq_flags & SID_CmdQue
 1123          || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
 1124                 printf("%s%d: Command Queueing enabled\n",
 1125                        periph->periph_name, periph->unit_number);
 1126         }
 1127         /* Announce caller's details if they've passed in. */
 1128         if (announce_string != NULL)
 1129                 printf("%s%d: %s\n", periph->periph_name,
 1130                        periph->unit_number, announce_string);
 1131 }
 1132 
 1133 void
 1134 xpt_announce_quirks(struct cam_periph *periph, int quirks, char *bit_string)
 1135 {
 1136         if (quirks != 0) {
 1137                 printf("%s%d: quirks=0x%b\n", periph->periph_name,
 1138                     periph->unit_number, quirks, bit_string);
 1139         }
 1140 }
 1141 
 1142 int
 1143 xpt_getattr(char *buf, size_t len, const char *attr, struct cam_path *path)
 1144 {
 1145         int ret = -1, l;
 1146         struct ccb_dev_advinfo cdai;
 1147         struct scsi_vpd_id_descriptor *idd;
 1148 
 1149         mtx_assert(path->bus->sim->mtx, MA_OWNED);
 1150 
 1151         memset(&cdai, 0, sizeof(cdai));
 1152         xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
 1153         cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
 1154         cdai.bufsiz = len;
 1155 
 1156         if (!strcmp(attr, "GEOM::ident"))
 1157                 cdai.buftype = CDAI_TYPE_SERIAL_NUM;
 1158         else if (!strcmp(attr, "GEOM::physpath"))
 1159                 cdai.buftype = CDAI_TYPE_PHYS_PATH;
 1160         else if (!strcmp(attr, "GEOM::lunid")) {
 1161                 cdai.buftype = CDAI_TYPE_SCSI_DEVID;
 1162                 cdai.bufsiz = CAM_SCSI_DEVID_MAXLEN;
 1163         } else
 1164                 goto out;
 1165 
 1166         cdai.buf = malloc(cdai.bufsiz, M_CAMXPT, M_NOWAIT|M_ZERO);
 1167         if (cdai.buf == NULL) {
 1168                 ret = ENOMEM;
 1169                 goto out;
 1170         }
 1171         xpt_action((union ccb *)&cdai); /* can only be synchronous */
 1172         if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
 1173                 cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE);
 1174         if (cdai.provsiz == 0)
 1175                 goto out;
 1176         if (cdai.buftype == CDAI_TYPE_SCSI_DEVID) {
 1177                 idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf,
 1178                     cdai.provsiz, scsi_devid_is_lun_naa);
 1179                 if (idd == NULL)
 1180                         idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf,
 1181                             cdai.provsiz, scsi_devid_is_lun_eui64);
 1182                 if (idd == NULL)
 1183                         idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf,
 1184                             cdai.provsiz, scsi_devid_is_lun_t10);
 1185                 if (idd == NULL)
 1186                         idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf,
 1187                             cdai.provsiz, scsi_devid_is_lun_name);
 1188                 if (idd == NULL)
 1189                         goto out;
 1190                 ret = 0;
 1191                 if ((idd->proto_codeset & SVPD_ID_CODESET_MASK) == SVPD_ID_CODESET_ASCII ||
 1192                     (idd->proto_codeset & SVPD_ID_CODESET_MASK) == SVPD_ID_CODESET_UTF8) {
 1193                         l = strnlen(idd->identifier, idd->length);
 1194                         if (l < len) {
 1195                                 bcopy(idd->identifier, buf, l);
 1196                                 buf[l] = 0;
 1197                         } else
 1198                                 ret = EFAULT;
 1199                 } else {
 1200                         if (idd->length * 2 < len) {
 1201                                 for (l = 0; l < idd->length; l++)
 1202                                         sprintf(buf + l * 2, "%02x",
 1203                                             idd->identifier[l]);
 1204                         } else
 1205                                 ret = EFAULT;
 1206                 }
 1207         } else {
 1208                 ret = 0;
 1209                 if (strlcpy(buf, cdai.buf, len) >= len)
 1210                         ret = EFAULT;
 1211         }
 1212 
 1213 out:
 1214         if (cdai.buf != NULL)
 1215                 free(cdai.buf, M_CAMXPT);
 1216         return ret;
 1217 }
 1218 
 1219 static dev_match_ret
 1220 xptbusmatch(struct dev_match_pattern *patterns, u_int num_patterns,
 1221             struct cam_eb *bus)
 1222 {
 1223         dev_match_ret retval;
 1224         int i;
 1225 
 1226         retval = DM_RET_NONE;
 1227 
 1228         /*
 1229          * If we aren't given something to match against, that's an error.
 1230          */
 1231         if (bus == NULL)
 1232                 return(DM_RET_ERROR);
 1233 
 1234         /*
 1235          * If there are no match entries, then this bus matches no
 1236          * matter what.
 1237          */
 1238         if ((patterns == NULL) || (num_patterns == 0))
 1239                 return(DM_RET_DESCEND | DM_RET_COPY);
 1240 
 1241         for (i = 0; i < num_patterns; i++) {
 1242                 struct bus_match_pattern *cur_pattern;
 1243 
 1244                 /*
 1245                  * If the pattern in question isn't for a bus node, we
 1246                  * aren't interested.  However, we do indicate to the
 1247                  * calling routine that we should continue descending the
 1248                  * tree, since the user wants to match against lower-level
 1249                  * EDT elements.
 1250                  */
 1251                 if (patterns[i].type != DEV_MATCH_BUS) {
 1252                         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
 1253                                 retval |= DM_RET_DESCEND;
 1254                         continue;
 1255                 }
 1256 
 1257                 cur_pattern = &patterns[i].pattern.bus_pattern;
 1258 
 1259                 /*
 1260                  * If they want to match any bus node, we give them any
 1261                  * device node.
 1262                  */
 1263                 if (cur_pattern->flags == BUS_MATCH_ANY) {
 1264                         /* set the copy flag */
 1265                         retval |= DM_RET_COPY;
 1266 
 1267                         /*
 1268                          * If we've already decided on an action, go ahead
 1269                          * and return.
 1270                          */
 1271                         if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
 1272                                 return(retval);
 1273                 }
 1274 
 1275                 /*
 1276                  * Not sure why someone would do this...
 1277                  */
 1278                 if (cur_pattern->flags == BUS_MATCH_NONE)
 1279                         continue;
 1280 
 1281                 if (((cur_pattern->flags & BUS_MATCH_PATH) != 0)
 1282                  && (cur_pattern->path_id != bus->path_id))
 1283                         continue;
 1284 
 1285                 if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0)
 1286                  && (cur_pattern->bus_id != bus->sim->bus_id))
 1287                         continue;
 1288 
 1289                 if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0)
 1290                  && (cur_pattern->unit_number != bus->sim->unit_number))
 1291                         continue;
 1292 
 1293                 if (((cur_pattern->flags & BUS_MATCH_NAME) != 0)
 1294                  && (strncmp(cur_pattern->dev_name, bus->sim->sim_name,
 1295                              DEV_IDLEN) != 0))
 1296                         continue;
 1297 
 1298                 /*
 1299                  * If we get to this point, the user definitely wants
 1300                  * information on this bus.  So tell the caller to copy the
 1301                  * data out.
 1302                  */
 1303                 retval |= DM_RET_COPY;
 1304 
 1305                 /*
 1306                  * If the return action has been set to descend, then we
 1307                  * know that we've already seen a non-bus matching
 1308                  * expression, therefore we need to further descend the tree.
 1309                  * This won't change by continuing around the loop, so we
 1310                  * go ahead and return.  If we haven't seen a non-bus
 1311                  * matching expression, we keep going around the loop until
 1312                  * we exhaust the matching expressions.  We'll set the stop
 1313                  * flag once we fall out of the loop.
 1314                  */
 1315                 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
 1316                         return(retval);
 1317         }
 1318 
 1319         /*
 1320          * If the return action hasn't been set to descend yet, that means
 1321          * we haven't seen anything other than bus matching patterns.  So
 1322          * tell the caller to stop descending the tree -- the user doesn't
 1323          * want to match against lower level tree elements.
 1324          */
 1325         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
 1326                 retval |= DM_RET_STOP;
 1327 
 1328         return(retval);
 1329 }
 1330 
 1331 static dev_match_ret
 1332 xptdevicematch(struct dev_match_pattern *patterns, u_int num_patterns,
 1333                struct cam_ed *device)
 1334 {
 1335         dev_match_ret retval;
 1336         int i;
 1337 
 1338         retval = DM_RET_NONE;
 1339 
 1340         /*
 1341          * If we aren't given something to match against, that's an error.
 1342          */
 1343         if (device == NULL)
 1344                 return(DM_RET_ERROR);
 1345 
 1346         /*
 1347          * If there are no match entries, then this device matches no
 1348          * matter what.
 1349          */
 1350         if ((patterns == NULL) || (num_patterns == 0))
 1351                 return(DM_RET_DESCEND | DM_RET_COPY);
 1352 
 1353         for (i = 0; i < num_patterns; i++) {
 1354                 struct device_match_pattern *cur_pattern;
 1355                 struct scsi_vpd_device_id *device_id_page;
 1356 
 1357                 /*
 1358                  * If the pattern in question isn't for a device node, we
 1359                  * aren't interested.
 1360                  */
 1361                 if (patterns[i].type != DEV_MATCH_DEVICE) {
 1362                         if ((patterns[i].type == DEV_MATCH_PERIPH)
 1363                          && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE))
 1364                                 retval |= DM_RET_DESCEND;
 1365                         continue;
 1366                 }
 1367 
 1368                 cur_pattern = &patterns[i].pattern.device_pattern;
 1369 
 1370                 /* Error out if mutually exclusive options are specified. */ 
 1371                 if ((cur_pattern->flags & (DEV_MATCH_INQUIRY|DEV_MATCH_DEVID))
 1372                  == (DEV_MATCH_INQUIRY|DEV_MATCH_DEVID))
 1373                         return(DM_RET_ERROR);
 1374 
 1375                 /*
 1376                  * If they want to match any device node, we give them any
 1377                  * device node.
 1378                  */
 1379                 if (cur_pattern->flags == DEV_MATCH_ANY)
 1380                         goto copy_dev_node;
 1381 
 1382                 /*
 1383                  * Not sure why someone would do this...
 1384                  */
 1385                 if (cur_pattern->flags == DEV_MATCH_NONE)
 1386                         continue;
 1387 
 1388                 if (((cur_pattern->flags & DEV_MATCH_PATH) != 0)
 1389                  && (cur_pattern->path_id != device->target->bus->path_id))
 1390                         continue;
 1391 
 1392                 if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0)
 1393                  && (cur_pattern->target_id != device->target->target_id))
 1394                         continue;
 1395 
 1396                 if (((cur_pattern->flags & DEV_MATCH_LUN) != 0)
 1397                  && (cur_pattern->target_lun != device->lun_id))
 1398                         continue;
 1399 
 1400                 if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0)
 1401                  && (cam_quirkmatch((caddr_t)&device->inq_data,
 1402                                     (caddr_t)&cur_pattern->data.inq_pat,
 1403                                     1, sizeof(cur_pattern->data.inq_pat),
 1404                                     scsi_static_inquiry_match) == NULL))
 1405                         continue;
 1406 
 1407                 device_id_page = (struct scsi_vpd_device_id *)device->device_id;
 1408                 if (((cur_pattern->flags & DEV_MATCH_DEVID) != 0)
 1409                  && (device->device_id_len < SVPD_DEVICE_ID_HDR_LEN
 1410                   || scsi_devid_match((uint8_t *)device_id_page->desc_list,
 1411                                       device->device_id_len
 1412                                     - SVPD_DEVICE_ID_HDR_LEN,
 1413                                       cur_pattern->data.devid_pat.id,
 1414                                       cur_pattern->data.devid_pat.id_len) != 0))
 1415                         continue;
 1416 
 1417 copy_dev_node:
 1418                 /*
 1419                  * If we get to this point, the user definitely wants
 1420                  * information on this device.  So tell the caller to copy
 1421                  * the data out.
 1422                  */
 1423                 retval |= DM_RET_COPY;
 1424 
 1425                 /*
 1426                  * If the return action has been set to descend, then we
 1427                  * know that we've already seen a peripheral matching
 1428                  * expression, therefore we need to further descend the tree.
 1429                  * This won't change by continuing around the loop, so we
 1430                  * go ahead and return.  If we haven't seen a peripheral
 1431                  * matching expression, we keep going around the loop until
 1432                  * we exhaust the matching expressions.  We'll set the stop
 1433                  * flag once we fall out of the loop.
 1434                  */
 1435                 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
 1436                         return(retval);
 1437         }
 1438 
 1439         /*
 1440          * If the return action hasn't been set to descend yet, that means
 1441          * we haven't seen any peripheral matching patterns.  So tell the
 1442          * caller to stop descending the tree -- the user doesn't want to
 1443          * match against lower level tree elements.
 1444          */
 1445         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
 1446                 retval |= DM_RET_STOP;
 1447 
 1448         return(retval);
 1449 }
 1450 
 1451 /*
 1452  * Match a single peripheral against any number of match patterns.
 1453  */
 1454 static dev_match_ret
 1455 xptperiphmatch(struct dev_match_pattern *patterns, u_int num_patterns,
 1456                struct cam_periph *periph)
 1457 {
 1458         dev_match_ret retval;
 1459         int i;
 1460 
 1461         /*
 1462          * If we aren't given something to match against, that's an error.
 1463          */
 1464         if (periph == NULL)
 1465                 return(DM_RET_ERROR);
 1466 
 1467         /*
 1468          * If there are no match entries, then this peripheral matches no
 1469          * matter what.
 1470          */
 1471         if ((patterns == NULL) || (num_patterns == 0))
 1472                 return(DM_RET_STOP | DM_RET_COPY);
 1473 
 1474         /*
 1475          * There aren't any nodes below a peripheral node, so there's no
 1476          * reason to descend the tree any further.
 1477          */
 1478         retval = DM_RET_STOP;
 1479 
 1480         for (i = 0; i < num_patterns; i++) {
 1481                 struct periph_match_pattern *cur_pattern;
 1482 
 1483                 /*
 1484                  * If the pattern in question isn't for a peripheral, we
 1485                  * aren't interested.
 1486                  */
 1487                 if (patterns[i].type != DEV_MATCH_PERIPH)
 1488                         continue;
 1489 
 1490                 cur_pattern = &patterns[i].pattern.periph_pattern;
 1491 
 1492                 /*
 1493                  * If they want to match on anything, then we will do so.
 1494                  */
 1495                 if (cur_pattern->flags == PERIPH_MATCH_ANY) {
 1496                         /* set the copy flag */
 1497                         retval |= DM_RET_COPY;
 1498 
 1499                         /*
 1500                          * We've already set the return action to stop,
 1501                          * since there are no nodes below peripherals in
 1502                          * the tree.
 1503                          */
 1504                         return(retval);
 1505                 }
 1506 
 1507                 /*
 1508                  * Not sure why someone would do this...
 1509                  */
 1510                 if (cur_pattern->flags == PERIPH_MATCH_NONE)
 1511                         continue;
 1512 
 1513                 if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0)
 1514                  && (cur_pattern->path_id != periph->path->bus->path_id))
 1515                         continue;
 1516 
 1517                 /*
 1518                  * For the target and lun id's, we have to make sure the
 1519                  * target and lun pointers aren't NULL.  The xpt peripheral
 1520                  * has a wildcard target and device.
 1521                  */
 1522                 if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0)
 1523                  && ((periph->path->target == NULL)
 1524                  ||(cur_pattern->target_id != periph->path->target->target_id)))
 1525                         continue;
 1526 
 1527                 if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0)
 1528                  && ((periph->path->device == NULL)
 1529                  || (cur_pattern->target_lun != periph->path->device->lun_id)))
 1530                         continue;
 1531 
 1532                 if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0)
 1533                  && (cur_pattern->unit_number != periph->unit_number))
 1534                         continue;
 1535 
 1536                 if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0)
 1537                  && (strncmp(cur_pattern->periph_name, periph->periph_name,
 1538                              DEV_IDLEN) != 0))
 1539                         continue;
 1540 
 1541                 /*
 1542                  * If we get to this point, the user definitely wants
 1543                  * information on this peripheral.  So tell the caller to
 1544                  * copy the data out.
 1545                  */
 1546                 retval |= DM_RET_COPY;
 1547 
 1548                 /*
 1549                  * The return action has already been set to stop, since
 1550                  * peripherals don't have any nodes below them in the EDT.
 1551                  */
 1552                 return(retval);
 1553         }
 1554 
 1555         /*
 1556          * If we get to this point, the peripheral that was passed in
 1557          * doesn't match any of the patterns.
 1558          */
 1559         return(retval);
 1560 }
 1561 
 1562 static int
 1563 xptedtbusfunc(struct cam_eb *bus, void *arg)
 1564 {
 1565         struct ccb_dev_match *cdm;
 1566         dev_match_ret retval;
 1567 
 1568         cdm = (struct ccb_dev_match *)arg;
 1569 
 1570         /*
 1571          * If our position is for something deeper in the tree, that means
 1572          * that we've already seen this node.  So, we keep going down.
 1573          */
 1574         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 1575          && (cdm->pos.cookie.bus == bus)
 1576          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 1577          && (cdm->pos.cookie.target != NULL))
 1578                 retval = DM_RET_DESCEND;
 1579         else
 1580                 retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus);
 1581 
 1582         /*
 1583          * If we got an error, bail out of the search.
 1584          */
 1585         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
 1586                 cdm->status = CAM_DEV_MATCH_ERROR;
 1587                 return(0);
 1588         }
 1589 
 1590         /*
 1591          * If the copy flag is set, copy this bus out.
 1592          */
 1593         if (retval & DM_RET_COPY) {
 1594                 int spaceleft, j;
 1595 
 1596                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
 1597                         sizeof(struct dev_match_result));
 1598 
 1599                 /*
 1600                  * If we don't have enough space to put in another
 1601                  * match result, save our position and tell the
 1602                  * user there are more devices to check.
 1603                  */
 1604                 if (spaceleft < sizeof(struct dev_match_result)) {
 1605                         bzero(&cdm->pos, sizeof(cdm->pos));
 1606                         cdm->pos.position_type =
 1607                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS;
 1608 
 1609                         cdm->pos.cookie.bus = bus;
 1610                         cdm->pos.generations[CAM_BUS_GENERATION]=
 1611                                 xsoftc.bus_generation;
 1612                         cdm->status = CAM_DEV_MATCH_MORE;
 1613                         return(0);
 1614                 }
 1615                 j = cdm->num_matches;
 1616                 cdm->num_matches++;
 1617                 cdm->matches[j].type = DEV_MATCH_BUS;
 1618                 cdm->matches[j].result.bus_result.path_id = bus->path_id;
 1619                 cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id;
 1620                 cdm->matches[j].result.bus_result.unit_number =
 1621                         bus->sim->unit_number;
 1622                 strncpy(cdm->matches[j].result.bus_result.dev_name,
 1623                         bus->sim->sim_name, DEV_IDLEN);
 1624         }
 1625 
 1626         /*
 1627          * If the user is only interested in busses, there's no
 1628          * reason to descend to the next level in the tree.
 1629          */
 1630         if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
 1631                 return(1);
 1632 
 1633         /*
 1634          * If there is a target generation recorded, check it to
 1635          * make sure the target list hasn't changed.
 1636          */
 1637         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 1638          && (bus == cdm->pos.cookie.bus)
 1639          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 1640          && (cdm->pos.generations[CAM_TARGET_GENERATION] != 0)
 1641          && (cdm->pos.generations[CAM_TARGET_GENERATION] !=
 1642              bus->generation)) {
 1643                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
 1644                 return(0);
 1645         }
 1646 
 1647         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 1648          && (cdm->pos.cookie.bus == bus)
 1649          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 1650          && (cdm->pos.cookie.target != NULL))
 1651                 return(xpttargettraverse(bus,
 1652                                         (struct cam_et *)cdm->pos.cookie.target,
 1653                                          xptedttargetfunc, arg));
 1654         else
 1655                 return(xpttargettraverse(bus, NULL, xptedttargetfunc, arg));
 1656 }
 1657 
 1658 static int
 1659 xptedttargetfunc(struct cam_et *target, void *arg)
 1660 {
 1661         struct ccb_dev_match *cdm;
 1662 
 1663         cdm = (struct ccb_dev_match *)arg;
 1664 
 1665         /*
 1666          * If there is a device list generation recorded, check it to
 1667          * make sure the device list hasn't changed.
 1668          */
 1669         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 1670          && (cdm->pos.cookie.bus == target->bus)
 1671          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 1672          && (cdm->pos.cookie.target == target)
 1673          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
 1674          && (cdm->pos.generations[CAM_DEV_GENERATION] != 0)
 1675          && (cdm->pos.generations[CAM_DEV_GENERATION] !=
 1676              target->generation)) {
 1677                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
 1678                 return(0);
 1679         }
 1680 
 1681         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 1682          && (cdm->pos.cookie.bus == target->bus)
 1683          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 1684          && (cdm->pos.cookie.target == target)
 1685          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
 1686          && (cdm->pos.cookie.device != NULL))
 1687                 return(xptdevicetraverse(target,
 1688                                         (struct cam_ed *)cdm->pos.cookie.device,
 1689                                          xptedtdevicefunc, arg));
 1690         else
 1691                 return(xptdevicetraverse(target, NULL, xptedtdevicefunc, arg));
 1692 }
 1693 
 1694 static int
 1695 xptedtdevicefunc(struct cam_ed *device, void *arg)
 1696 {
 1697 
 1698         struct ccb_dev_match *cdm;
 1699         dev_match_ret retval;
 1700 
 1701         cdm = (struct ccb_dev_match *)arg;
 1702 
 1703         /*
 1704          * If our position is for something deeper in the tree, that means
 1705          * that we've already seen this node.  So, we keep going down.
 1706          */
 1707         if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE)
 1708          && (cdm->pos.cookie.device == device)
 1709          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
 1710          && (cdm->pos.cookie.periph != NULL))
 1711                 retval = DM_RET_DESCEND;
 1712         else
 1713                 retval = xptdevicematch(cdm->patterns, cdm->num_patterns,
 1714                                         device);
 1715 
 1716         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
 1717                 cdm->status = CAM_DEV_MATCH_ERROR;
 1718                 return(0);
 1719         }
 1720 
 1721         /*
 1722          * If the copy flag is set, copy this device out.
 1723          */
 1724         if (retval & DM_RET_COPY) {
 1725                 int spaceleft, j;
 1726 
 1727                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
 1728                         sizeof(struct dev_match_result));
 1729 
 1730                 /*
 1731                  * If we don't have enough space to put in another
 1732                  * match result, save our position and tell the
 1733                  * user there are more devices to check.
 1734                  */
 1735                 if (spaceleft < sizeof(struct dev_match_result)) {
 1736                         bzero(&cdm->pos, sizeof(cdm->pos));
 1737                         cdm->pos.position_type =
 1738                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
 1739                                 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE;
 1740 
 1741                         cdm->pos.cookie.bus = device->target->bus;
 1742                         cdm->pos.generations[CAM_BUS_GENERATION]=
 1743                                 xsoftc.bus_generation;
 1744                         cdm->pos.cookie.target = device->target;
 1745                         cdm->pos.generations[CAM_TARGET_GENERATION] =
 1746                                 device->target->bus->generation;
 1747                         cdm->pos.cookie.device = device;
 1748                         cdm->pos.generations[CAM_DEV_GENERATION] =
 1749                                 device->target->generation;
 1750                         cdm->status = CAM_DEV_MATCH_MORE;
 1751                         return(0);
 1752                 }
 1753                 j = cdm->num_matches;
 1754                 cdm->num_matches++;
 1755                 cdm->matches[j].type = DEV_MATCH_DEVICE;
 1756                 cdm->matches[j].result.device_result.path_id =
 1757                         device->target->bus->path_id;
 1758                 cdm->matches[j].result.device_result.target_id =
 1759                         device->target->target_id;
 1760                 cdm->matches[j].result.device_result.target_lun =
 1761                         device->lun_id;
 1762                 cdm->matches[j].result.device_result.protocol =
 1763                         device->protocol;
 1764                 bcopy(&device->inq_data,
 1765                       &cdm->matches[j].result.device_result.inq_data,
 1766                       sizeof(struct scsi_inquiry_data));
 1767                 bcopy(&device->ident_data,
 1768                       &cdm->matches[j].result.device_result.ident_data,
 1769                       sizeof(struct ata_params));
 1770 
 1771                 /* Let the user know whether this device is unconfigured */
 1772                 if (device->flags & CAM_DEV_UNCONFIGURED)
 1773                         cdm->matches[j].result.device_result.flags =
 1774                                 DEV_RESULT_UNCONFIGURED;
 1775                 else
 1776                         cdm->matches[j].result.device_result.flags =
 1777                                 DEV_RESULT_NOFLAG;
 1778         }
 1779 
 1780         /*
 1781          * If the user isn't interested in peripherals, don't descend
 1782          * the tree any further.
 1783          */
 1784         if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
 1785                 return(1);
 1786 
 1787         /*
 1788          * If there is a peripheral list generation recorded, make sure
 1789          * it hasn't changed.
 1790          */
 1791         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 1792          && (device->target->bus == cdm->pos.cookie.bus)
 1793          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 1794          && (device->target == cdm->pos.cookie.target)
 1795          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
 1796          && (device == cdm->pos.cookie.device)
 1797          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
 1798          && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
 1799          && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
 1800              device->generation)){
 1801                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
 1802                 return(0);
 1803         }
 1804 
 1805         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 1806          && (cdm->pos.cookie.bus == device->target->bus)
 1807          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 1808          && (cdm->pos.cookie.target == device->target)
 1809          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
 1810          && (cdm->pos.cookie.device == device)
 1811          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
 1812          && (cdm->pos.cookie.periph != NULL))
 1813                 return(xptperiphtraverse(device,
 1814                                 (struct cam_periph *)cdm->pos.cookie.periph,
 1815                                 xptedtperiphfunc, arg));
 1816         else
 1817                 return(xptperiphtraverse(device, NULL, xptedtperiphfunc, arg));
 1818 }
 1819 
 1820 static int
 1821 xptedtperiphfunc(struct cam_periph *periph, void *arg)
 1822 {
 1823         struct ccb_dev_match *cdm;
 1824         dev_match_ret retval;
 1825 
 1826         cdm = (struct ccb_dev_match *)arg;
 1827 
 1828         retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
 1829 
 1830         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
 1831                 cdm->status = CAM_DEV_MATCH_ERROR;
 1832                 return(0);
 1833         }
 1834 
 1835         /*
 1836          * If the copy flag is set, copy this peripheral out.
 1837          */
 1838         if (retval & DM_RET_COPY) {
 1839                 int spaceleft, j;
 1840 
 1841                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
 1842                         sizeof(struct dev_match_result));
 1843 
 1844                 /*
 1845                  * If we don't have enough space to put in another
 1846                  * match result, save our position and tell the
 1847                  * user there are more devices to check.
 1848                  */
 1849                 if (spaceleft < sizeof(struct dev_match_result)) {
 1850                         bzero(&cdm->pos, sizeof(cdm->pos));
 1851                         cdm->pos.position_type =
 1852                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
 1853                                 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE |
 1854                                 CAM_DEV_POS_PERIPH;
 1855 
 1856                         cdm->pos.cookie.bus = periph->path->bus;
 1857                         cdm->pos.generations[CAM_BUS_GENERATION]=
 1858                                 xsoftc.bus_generation;
 1859                         cdm->pos.cookie.target = periph->path->target;
 1860                         cdm->pos.generations[CAM_TARGET_GENERATION] =
 1861                                 periph->path->bus->generation;
 1862                         cdm->pos.cookie.device = periph->path->device;
 1863                         cdm->pos.generations[CAM_DEV_GENERATION] =
 1864                                 periph->path->target->generation;
 1865                         cdm->pos.cookie.periph = periph;
 1866                         cdm->pos.generations[CAM_PERIPH_GENERATION] =
 1867                                 periph->path->device->generation;
 1868                         cdm->status = CAM_DEV_MATCH_MORE;
 1869                         return(0);
 1870                 }
 1871 
 1872                 j = cdm->num_matches;
 1873                 cdm->num_matches++;
 1874                 cdm->matches[j].type = DEV_MATCH_PERIPH;
 1875                 cdm->matches[j].result.periph_result.path_id =
 1876                         periph->path->bus->path_id;
 1877                 cdm->matches[j].result.periph_result.target_id =
 1878                         periph->path->target->target_id;
 1879                 cdm->matches[j].result.periph_result.target_lun =
 1880                         periph->path->device->lun_id;
 1881                 cdm->matches[j].result.periph_result.unit_number =
 1882                         periph->unit_number;
 1883                 strncpy(cdm->matches[j].result.periph_result.periph_name,
 1884                         periph->periph_name, DEV_IDLEN);
 1885         }
 1886 
 1887         return(1);
 1888 }
 1889 
 1890 static int
 1891 xptedtmatch(struct ccb_dev_match *cdm)
 1892 {
 1893         int ret;
 1894 
 1895         cdm->num_matches = 0;
 1896 
 1897         /*
 1898          * Check the bus list generation.  If it has changed, the user
 1899          * needs to reset everything and start over.
 1900          */
 1901         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 1902          && (cdm->pos.generations[CAM_BUS_GENERATION] != 0)
 1903          && (cdm->pos.generations[CAM_BUS_GENERATION] != xsoftc.bus_generation)) {
 1904                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
 1905                 return(0);
 1906         }
 1907 
 1908         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 1909          && (cdm->pos.cookie.bus != NULL))
 1910                 ret = xptbustraverse((struct cam_eb *)cdm->pos.cookie.bus,
 1911                                      xptedtbusfunc, cdm);
 1912         else
 1913                 ret = xptbustraverse(NULL, xptedtbusfunc, cdm);
 1914 
 1915         /*
 1916          * If we get back 0, that means that we had to stop before fully
 1917          * traversing the EDT.  It also means that one of the subroutines
 1918          * has set the status field to the proper value.  If we get back 1,
 1919          * we've fully traversed the EDT and copied out any matching entries.
 1920          */
 1921         if (ret == 1)
 1922                 cdm->status = CAM_DEV_MATCH_LAST;
 1923 
 1924         return(ret);
 1925 }
 1926 
 1927 static int
 1928 xptplistpdrvfunc(struct periph_driver **pdrv, void *arg)
 1929 {
 1930         struct ccb_dev_match *cdm;
 1931 
 1932         cdm = (struct ccb_dev_match *)arg;
 1933 
 1934         if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
 1935          && (cdm->pos.cookie.pdrv == pdrv)
 1936          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
 1937          && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
 1938          && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
 1939              (*pdrv)->generation)) {
 1940                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
 1941                 return(0);
 1942         }
 1943 
 1944         if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
 1945          && (cdm->pos.cookie.pdrv == pdrv)
 1946          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
 1947          && (cdm->pos.cookie.periph != NULL))
 1948                 return(xptpdperiphtraverse(pdrv,
 1949                                 (struct cam_periph *)cdm->pos.cookie.periph,
 1950                                 xptplistperiphfunc, arg));
 1951         else
 1952                 return(xptpdperiphtraverse(pdrv, NULL,xptplistperiphfunc, arg));
 1953 }
 1954 
 1955 static int
 1956 xptplistperiphfunc(struct cam_periph *periph, void *arg)
 1957 {
 1958         struct ccb_dev_match *cdm;
 1959         dev_match_ret retval;
 1960 
 1961         cdm = (struct ccb_dev_match *)arg;
 1962 
 1963         retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
 1964 
 1965         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
 1966                 cdm->status = CAM_DEV_MATCH_ERROR;
 1967                 return(0);
 1968         }
 1969 
 1970         /*
 1971          * If the copy flag is set, copy this peripheral out.
 1972          */
 1973         if (retval & DM_RET_COPY) {
 1974                 int spaceleft, j;
 1975 
 1976                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
 1977                         sizeof(struct dev_match_result));
 1978 
 1979                 /*
 1980                  * If we don't have enough space to put in another
 1981                  * match result, save our position and tell the
 1982                  * user there are more devices to check.
 1983                  */
 1984                 if (spaceleft < sizeof(struct dev_match_result)) {
 1985                         struct periph_driver **pdrv;
 1986 
 1987                         pdrv = NULL;
 1988                         bzero(&cdm->pos, sizeof(cdm->pos));
 1989                         cdm->pos.position_type =
 1990                                 CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR |
 1991                                 CAM_DEV_POS_PERIPH;
 1992 
 1993                         /*
 1994                          * This may look a bit non-sensical, but it is
 1995                          * actually quite logical.  There are very few
 1996                          * peripheral drivers, and bloating every peripheral
 1997                          * structure with a pointer back to its parent
 1998                          * peripheral driver linker set entry would cost
 1999                          * more in the long run than doing this quick lookup.
 2000                          */
 2001                         for (pdrv = periph_drivers; *pdrv != NULL; pdrv++) {
 2002                                 if (strcmp((*pdrv)->driver_name,
 2003                                     periph->periph_name) == 0)
 2004                                         break;
 2005                         }
 2006 
 2007                         if (*pdrv == NULL) {
 2008                                 cdm->status = CAM_DEV_MATCH_ERROR;
 2009                                 return(0);
 2010                         }
 2011 
 2012                         cdm->pos.cookie.pdrv = pdrv;
 2013                         /*
 2014                          * The periph generation slot does double duty, as
 2015                          * does the periph pointer slot.  They are used for
 2016                          * both edt and pdrv lookups and positioning.
 2017                          */
 2018                         cdm->pos.cookie.periph = periph;
 2019                         cdm->pos.generations[CAM_PERIPH_GENERATION] =
 2020                                 (*pdrv)->generation;
 2021                         cdm->status = CAM_DEV_MATCH_MORE;
 2022                         return(0);
 2023                 }
 2024 
 2025                 j = cdm->num_matches;
 2026                 cdm->num_matches++;
 2027                 cdm->matches[j].type = DEV_MATCH_PERIPH;
 2028                 cdm->matches[j].result.periph_result.path_id =
 2029                         periph->path->bus->path_id;
 2030 
 2031                 /*
 2032                  * The transport layer peripheral doesn't have a target or
 2033                  * lun.
 2034                  */
 2035                 if (periph->path->target)
 2036                         cdm->matches[j].result.periph_result.target_id =
 2037                                 periph->path->target->target_id;
 2038                 else
 2039                         cdm->matches[j].result.periph_result.target_id = -1;
 2040 
 2041                 if (periph->path->device)
 2042                         cdm->matches[j].result.periph_result.target_lun =
 2043                                 periph->path->device->lun_id;
 2044                 else
 2045                         cdm->matches[j].result.periph_result.target_lun = -1;
 2046 
 2047                 cdm->matches[j].result.periph_result.unit_number =
 2048                         periph->unit_number;
 2049                 strncpy(cdm->matches[j].result.periph_result.periph_name,
 2050                         periph->periph_name, DEV_IDLEN);
 2051         }
 2052 
 2053         return(1);
 2054 }
 2055 
 2056 static int
 2057 xptperiphlistmatch(struct ccb_dev_match *cdm)
 2058 {
 2059         int ret;
 2060 
 2061         cdm->num_matches = 0;
 2062 
 2063         /*
 2064          * At this point in the edt traversal function, we check the bus
 2065          * list generation to make sure that no busses have been added or
 2066          * removed since the user last sent a XPT_DEV_MATCH ccb through.
 2067          * For the peripheral driver list traversal function, however, we
 2068          * don't have to worry about new peripheral driver types coming or
 2069          * going; they're in a linker set, and therefore can't change
 2070          * without a recompile.
 2071          */
 2072 
 2073         if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
 2074          && (cdm->pos.cookie.pdrv != NULL))
 2075                 ret = xptpdrvtraverse(
 2076                                 (struct periph_driver **)cdm->pos.cookie.pdrv,
 2077                                 xptplistpdrvfunc, cdm);
 2078         else
 2079                 ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm);
 2080 
 2081         /*
 2082          * If we get back 0, that means that we had to stop before fully
 2083          * traversing the peripheral driver tree.  It also means that one of
 2084          * the subroutines has set the status field to the proper value.  If
 2085          * we get back 1, we've fully traversed the EDT and copied out any
 2086          * matching entries.
 2087          */
 2088         if (ret == 1)
 2089                 cdm->status = CAM_DEV_MATCH_LAST;
 2090 
 2091         return(ret);
 2092 }
 2093 
 2094 static int
 2095 xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg)
 2096 {
 2097         struct cam_eb *bus, *next_bus;
 2098         int retval;
 2099 
 2100         retval = 1;
 2101 
 2102         xpt_lock_buses();
 2103         for (bus = (start_bus ? start_bus : TAILQ_FIRST(&xsoftc.xpt_busses));
 2104              bus != NULL;
 2105              bus = next_bus) {
 2106 
 2107                 bus->refcount++;
 2108 
 2109                 /*
 2110                  * XXX The locking here is obviously very complex.  We
 2111                  * should work to simplify it.
 2112                  */
 2113                 xpt_unlock_buses();
 2114                 CAM_SIM_LOCK(bus->sim);
 2115                 retval = tr_func(bus, arg);
 2116                 CAM_SIM_UNLOCK(bus->sim);
 2117 
 2118                 xpt_lock_buses();
 2119                 next_bus = TAILQ_NEXT(bus, links);
 2120                 xpt_unlock_buses();
 2121 
 2122                 xpt_release_bus(bus);
 2123 
 2124                 if (retval == 0)
 2125                         return(retval);
 2126                 xpt_lock_buses();
 2127         }
 2128         xpt_unlock_buses();
 2129 
 2130         return(retval);
 2131 }
 2132 
 2133 int
 2134 xpt_sim_opened(struct cam_sim *sim)
 2135 {
 2136         struct cam_eb *bus;
 2137         struct cam_et *target;
 2138         struct cam_ed *device;
 2139         struct cam_periph *periph;
 2140 
 2141         KASSERT(sim->refcount >= 1, ("sim->refcount >= 1"));
 2142         mtx_assert(sim->mtx, MA_OWNED);
 2143 
 2144         xpt_lock_buses();
 2145         TAILQ_FOREACH(bus, &xsoftc.xpt_busses, links) {
 2146                 if (bus->sim != sim)
 2147                         continue;
 2148 
 2149                 TAILQ_FOREACH(target, &bus->et_entries, links) {
 2150                         TAILQ_FOREACH(device, &target->ed_entries, links) {
 2151                                 SLIST_FOREACH(periph, &device->periphs,
 2152                                     periph_links) {
 2153                                         if (periph->refcount > 0) {
 2154                                                 xpt_unlock_buses();
 2155                                                 return (1);
 2156                                         }
 2157                                 }
 2158                         }
 2159                 }
 2160         }
 2161 
 2162         xpt_unlock_buses();
 2163         return (0);
 2164 }
 2165 
 2166 static int
 2167 xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target,
 2168                   xpt_targetfunc_t *tr_func, void *arg)
 2169 {
 2170         struct cam_et *target, *next_target;
 2171         int retval;
 2172 
 2173         mtx_assert(bus->sim->mtx, MA_OWNED);
 2174         retval = 1;
 2175         for (target = (start_target ? start_target :
 2176                        TAILQ_FIRST(&bus->et_entries));
 2177              target != NULL; target = next_target) {
 2178 
 2179                 target->refcount++;
 2180 
 2181                 retval = tr_func(target, arg);
 2182 
 2183                 next_target = TAILQ_NEXT(target, links);
 2184 
 2185                 xpt_release_target(target);
 2186 
 2187                 if (retval == 0)
 2188                         return(retval);
 2189         }
 2190 
 2191         return(retval);
 2192 }
 2193 
 2194 static int
 2195 xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device,
 2196                   xpt_devicefunc_t *tr_func, void *arg)
 2197 {
 2198         struct cam_ed *device, *next_device;
 2199         int retval;
 2200 
 2201         mtx_assert(target->bus->sim->mtx, MA_OWNED);
 2202         retval = 1;
 2203         for (device = (start_device ? start_device :
 2204                        TAILQ_FIRST(&target->ed_entries));
 2205              device != NULL;
 2206              device = next_device) {
 2207 
 2208                 /*
 2209                  * Hold a reference so the current device does not go away
 2210                  * on us.
 2211                  */
 2212                 device->refcount++;
 2213 
 2214                 retval = tr_func(device, arg);
 2215 
 2216                 /*
 2217                  * Grab our next pointer before we release the current
 2218                  * device.
 2219                  */
 2220                 next_device = TAILQ_NEXT(device, links);
 2221 
 2222                 xpt_release_device(device);
 2223 
 2224                 if (retval == 0)
 2225                         return(retval);
 2226         }
 2227 
 2228         return(retval);
 2229 }
 2230 
 2231 static int
 2232 xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph,
 2233                   xpt_periphfunc_t *tr_func, void *arg)
 2234 {
 2235         struct cam_periph *periph, *next_periph;
 2236         int retval;
 2237 
 2238         retval = 1;
 2239 
 2240         mtx_assert(device->sim->mtx, MA_OWNED);
 2241         xpt_lock_buses();
 2242         for (periph = (start_periph ? start_periph :
 2243                        SLIST_FIRST(&device->periphs));
 2244              periph != NULL;
 2245              periph = next_periph) {
 2246 
 2247 
 2248                 /*
 2249                  * In this case, we want to show peripherals that have been
 2250                  * invalidated, but not peripherals that are scheduled to
 2251                  * be freed.  So instead of calling cam_periph_acquire(),
 2252                  * which will fail if the periph has been invalidated, we
 2253                  * just check for the free flag here.  If it is in the
 2254                  * process of being freed, we skip to the next periph.
 2255                  */
 2256                 if (periph->flags & CAM_PERIPH_FREE) {
 2257                         next_periph = SLIST_NEXT(periph, periph_links);
 2258                         continue;
 2259                 }
 2260 
 2261                 /*
 2262                  * Acquire a reference to this periph while we call the
 2263                  * traversal function, so it can't go away.
 2264                  */
 2265                 periph->refcount++;
 2266 
 2267                 retval = tr_func(periph, arg);
 2268 
 2269                 /*
 2270                  * Grab the next peripheral before we release this one, so
 2271                  * our next pointer is still valid.
 2272                  */
 2273                 next_periph = SLIST_NEXT(periph, periph_links);
 2274 
 2275                 cam_periph_release_locked_buses(periph);
 2276 
 2277                 if (retval == 0)
 2278                         goto bailout_done;
 2279         }
 2280 
 2281 bailout_done:
 2282 
 2283         xpt_unlock_buses();
 2284 
 2285         return(retval);
 2286 }
 2287 
 2288 static int
 2289 xptpdrvtraverse(struct periph_driver **start_pdrv,
 2290                 xpt_pdrvfunc_t *tr_func, void *arg)
 2291 {
 2292         struct periph_driver **pdrv;
 2293         int retval;
 2294 
 2295         retval = 1;
 2296 
 2297         /*
 2298          * We don't traverse the peripheral driver list like we do the
 2299          * other lists, because it is a linker set, and therefore cannot be
 2300          * changed during runtime.  If the peripheral driver list is ever
 2301          * re-done to be something other than a linker set (i.e. it can
 2302          * change while the system is running), the list traversal should
 2303          * be modified to work like the other traversal functions.
 2304          */
 2305         for (pdrv = (start_pdrv ? start_pdrv : periph_drivers);
 2306              *pdrv != NULL; pdrv++) {
 2307                 retval = tr_func(pdrv, arg);
 2308 
 2309                 if (retval == 0)
 2310                         return(retval);
 2311         }
 2312 
 2313         return(retval);
 2314 }
 2315 
 2316 static int
 2317 xptpdperiphtraverse(struct periph_driver **pdrv,
 2318                     struct cam_periph *start_periph,
 2319                     xpt_periphfunc_t *tr_func, void *arg)
 2320 {
 2321         struct cam_periph *periph, *next_periph;
 2322         struct cam_sim *sim;
 2323         int retval;
 2324 
 2325         retval = 1;
 2326 
 2327         xpt_lock_buses();
 2328         for (periph = (start_periph ? start_periph :
 2329              TAILQ_FIRST(&(*pdrv)->units)); periph != NULL;
 2330              periph = next_periph) {
 2331 
 2332 
 2333                 /*
 2334                  * In this case, we want to show peripherals that have been
 2335                  * invalidated, but not peripherals that are scheduled to
 2336                  * be freed.  So instead of calling cam_periph_acquire(),
 2337                  * which will fail if the periph has been invalidated, we
 2338                  * just check for the free flag here.  If it is free, we
 2339                  * skip to the next periph.
 2340                  */
 2341                 if (periph->flags & CAM_PERIPH_FREE) {
 2342                         next_periph = TAILQ_NEXT(periph, unit_links);
 2343                         continue;
 2344                 }
 2345 
 2346                 /*
 2347                  * Acquire a reference to this periph while we call the
 2348                  * traversal function, so it can't go away.
 2349                  */
 2350                 periph->refcount++;
 2351                 sim = periph->sim;
 2352                 xpt_unlock_buses();
 2353                 CAM_SIM_LOCK(sim);
 2354                 xpt_lock_buses();
 2355                 retval = tr_func(periph, arg);
 2356 
 2357                 /*
 2358                  * Grab the next peripheral before we release this one, so
 2359                  * our next pointer is still valid.
 2360                  */
 2361                 next_periph = TAILQ_NEXT(periph, unit_links);
 2362 
 2363                 cam_periph_release_locked_buses(periph);
 2364                 CAM_SIM_UNLOCK(sim);
 2365 
 2366                 if (retval == 0)
 2367                         goto bailout_done;
 2368         }
 2369 bailout_done:
 2370 
 2371         xpt_unlock_buses();
 2372 
 2373         return(retval);
 2374 }
 2375 
 2376 static int
 2377 xptdefbusfunc(struct cam_eb *bus, void *arg)
 2378 {
 2379         struct xpt_traverse_config *tr_config;
 2380 
 2381         tr_config = (struct xpt_traverse_config *)arg;
 2382 
 2383         if (tr_config->depth == XPT_DEPTH_BUS) {
 2384                 xpt_busfunc_t *tr_func;
 2385 
 2386                 tr_func = (xpt_busfunc_t *)tr_config->tr_func;
 2387 
 2388                 return(tr_func(bus, tr_config->tr_arg));
 2389         } else
 2390                 return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg));
 2391 }
 2392 
 2393 static int
 2394 xptdeftargetfunc(struct cam_et *target, void *arg)
 2395 {
 2396         struct xpt_traverse_config *tr_config;
 2397 
 2398         tr_config = (struct xpt_traverse_config *)arg;
 2399 
 2400         if (tr_config->depth == XPT_DEPTH_TARGET) {
 2401                 xpt_targetfunc_t *tr_func;
 2402 
 2403                 tr_func = (xpt_targetfunc_t *)tr_config->tr_func;
 2404 
 2405                 return(tr_func(target, tr_config->tr_arg));
 2406         } else
 2407                 return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg));
 2408 }
 2409 
 2410 static int
 2411 xptdefdevicefunc(struct cam_ed *device, void *arg)
 2412 {
 2413         struct xpt_traverse_config *tr_config;
 2414 
 2415         tr_config = (struct xpt_traverse_config *)arg;
 2416 
 2417         if (tr_config->depth == XPT_DEPTH_DEVICE) {
 2418                 xpt_devicefunc_t *tr_func;
 2419 
 2420                 tr_func = (xpt_devicefunc_t *)tr_config->tr_func;
 2421 
 2422                 return(tr_func(device, tr_config->tr_arg));
 2423         } else
 2424                 return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg));
 2425 }
 2426 
 2427 static int
 2428 xptdefperiphfunc(struct cam_periph *periph, void *arg)
 2429 {
 2430         struct xpt_traverse_config *tr_config;
 2431         xpt_periphfunc_t *tr_func;
 2432 
 2433         tr_config = (struct xpt_traverse_config *)arg;
 2434 
 2435         tr_func = (xpt_periphfunc_t *)tr_config->tr_func;
 2436 
 2437         /*
 2438          * Unlike the other default functions, we don't check for depth
 2439          * here.  The peripheral driver level is the last level in the EDT,
 2440          * so if we're here, we should execute the function in question.
 2441          */
 2442         return(tr_func(periph, tr_config->tr_arg));
 2443 }
 2444 
 2445 /*
 2446  * Execute the given function for every bus in the EDT.
 2447  */
 2448 static int
 2449 xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg)
 2450 {
 2451         struct xpt_traverse_config tr_config;
 2452 
 2453         tr_config.depth = XPT_DEPTH_BUS;
 2454         tr_config.tr_func = tr_func;
 2455         tr_config.tr_arg = arg;
 2456 
 2457         return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
 2458 }
 2459 
 2460 /*
 2461  * Execute the given function for every device in the EDT.
 2462  */
 2463 static int
 2464 xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg)
 2465 {
 2466         struct xpt_traverse_config tr_config;
 2467 
 2468         tr_config.depth = XPT_DEPTH_DEVICE;
 2469         tr_config.tr_func = tr_func;
 2470         tr_config.tr_arg = arg;
 2471 
 2472         return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
 2473 }
 2474 
 2475 static int
 2476 xptsetasyncfunc(struct cam_ed *device, void *arg)
 2477 {
 2478         struct cam_path path;
 2479         struct ccb_getdev cgd;
 2480         struct ccb_setasync *csa = (struct ccb_setasync *)arg;
 2481 
 2482         /*
 2483          * Don't report unconfigured devices (Wildcard devs,
 2484          * devices only for target mode, device instances
 2485          * that have been invalidated but are waiting for
 2486          * their last reference count to be released).
 2487          */
 2488         if ((device->flags & CAM_DEV_UNCONFIGURED) != 0)
 2489                 return (1);
 2490 
 2491         xpt_compile_path(&path,
 2492                          NULL,
 2493                          device->target->bus->path_id,
 2494                          device->target->target_id,
 2495                          device->lun_id);
 2496         xpt_setup_ccb(&cgd.ccb_h, &path, CAM_PRIORITY_NORMAL);
 2497         cgd.ccb_h.func_code = XPT_GDEV_TYPE;
 2498         xpt_action((union ccb *)&cgd);
 2499         csa->callback(csa->callback_arg,
 2500                             AC_FOUND_DEVICE,
 2501                             &path, &cgd);
 2502         xpt_release_path(&path);
 2503 
 2504         return(1);
 2505 }
 2506 
 2507 static int
 2508 xptsetasyncbusfunc(struct cam_eb *bus, void *arg)
 2509 {
 2510         struct cam_path path;
 2511         struct ccb_pathinq cpi;
 2512         struct ccb_setasync *csa = (struct ccb_setasync *)arg;
 2513 
 2514         xpt_compile_path(&path, /*periph*/NULL,
 2515                          bus->sim->path_id,
 2516                          CAM_TARGET_WILDCARD,
 2517                          CAM_LUN_WILDCARD);
 2518         xpt_setup_ccb(&cpi.ccb_h, &path, CAM_PRIORITY_NORMAL);
 2519         cpi.ccb_h.func_code = XPT_PATH_INQ;
 2520         xpt_action((union ccb *)&cpi);
 2521         csa->callback(csa->callback_arg,
 2522                             AC_PATH_REGISTERED,
 2523                             &path, &cpi);
 2524         xpt_release_path(&path);
 2525 
 2526         return(1);
 2527 }
 2528 
 2529 void
 2530 xpt_action(union ccb *start_ccb)
 2531 {
 2532 
 2533         CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_action\n"));
 2534 
 2535         start_ccb->ccb_h.status = CAM_REQ_INPROG;
 2536         (*(start_ccb->ccb_h.path->bus->xport->action))(start_ccb);
 2537 }
 2538 
 2539 void
 2540 xpt_action_default(union ccb *start_ccb)
 2541 {
 2542         struct cam_path *path;
 2543 
 2544         path = start_ccb->ccb_h.path;
 2545         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_action_default\n"));
 2546 
 2547         switch (start_ccb->ccb_h.func_code) {
 2548         case XPT_SCSI_IO:
 2549         {
 2550                 struct cam_ed *device;
 2551 
 2552                 /*
 2553                  * For the sake of compatibility with SCSI-1
 2554                  * devices that may not understand the identify
 2555                  * message, we include lun information in the
 2556                  * second byte of all commands.  SCSI-1 specifies
 2557                  * that luns are a 3 bit value and reserves only 3
 2558                  * bits for lun information in the CDB.  Later
 2559                  * revisions of the SCSI spec allow for more than 8
 2560                  * luns, but have deprecated lun information in the
 2561                  * CDB.  So, if the lun won't fit, we must omit.
 2562                  *
 2563                  * Also be aware that during initial probing for devices,
 2564                  * the inquiry information is unknown but initialized to 0.
 2565                  * This means that this code will be exercised while probing
 2566                  * devices with an ANSI revision greater than 2.
 2567                  */
 2568                 device = path->device;
 2569                 if (device->protocol_version <= SCSI_REV_2
 2570                  && start_ccb->ccb_h.target_lun < 8
 2571                  && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) {
 2572 
 2573                         start_ccb->csio.cdb_io.cdb_bytes[1] |=
 2574                             start_ccb->ccb_h.target_lun << 5;
 2575                 }
 2576                 start_ccb->csio.scsi_status = SCSI_STATUS_OK;
 2577         }
 2578         /* FALLTHROUGH */
 2579         case XPT_TARGET_IO:
 2580         case XPT_CONT_TARGET_IO:
 2581                 start_ccb->csio.sense_resid = 0;
 2582                 start_ccb->csio.resid = 0;
 2583                 /* FALLTHROUGH */
 2584         case XPT_ATA_IO:
 2585                 if (start_ccb->ccb_h.func_code == XPT_ATA_IO)
 2586                         start_ccb->ataio.resid = 0;
 2587                 /* FALLTHROUGH */
 2588         case XPT_RESET_DEV:
 2589         case XPT_ENG_EXEC:
 2590         case XPT_SMP_IO:
 2591         {
 2592                 int frozen;
 2593 
 2594                 frozen = cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb);
 2595                 path->device->sim->devq->alloc_openings += frozen;
 2596                 if (frozen > 0)
 2597                         xpt_run_dev_allocq(path->bus);
 2598                 if (xpt_schedule_dev_sendq(path->bus, path->device))
 2599                         xpt_run_dev_sendq(path->bus);
 2600                 break;
 2601         }
 2602         case XPT_CALC_GEOMETRY:
 2603         {
 2604                 struct cam_sim *sim;
 2605 
 2606                 /* Filter out garbage */
 2607                 if (start_ccb->ccg.block_size == 0
 2608                  || start_ccb->ccg.volume_size == 0) {
 2609                         start_ccb->ccg.cylinders = 0;
 2610                         start_ccb->ccg.heads = 0;
 2611                         start_ccb->ccg.secs_per_track = 0;
 2612                         start_ccb->ccb_h.status = CAM_REQ_CMP;
 2613                         break;
 2614                 }
 2615 #if defined(PC98) || defined(__sparc64__)
 2616                 /*
 2617                  * In a PC-98 system, geometry translation depens on
 2618                  * the "real" device geometry obtained from mode page 4.
 2619                  * SCSI geometry translation is performed in the
 2620                  * initialization routine of the SCSI BIOS and the result
 2621                  * stored in host memory.  If the translation is available
 2622                  * in host memory, use it.  If not, rely on the default
 2623                  * translation the device driver performs.
 2624                  * For sparc64, we may need adjust the geometry of large
 2625                  * disks in order to fit the limitations of the 16-bit
 2626                  * fields of the VTOC8 disk label.
 2627                  */
 2628                 if (scsi_da_bios_params(&start_ccb->ccg) != 0) {
 2629                         start_ccb->ccb_h.status = CAM_REQ_CMP;
 2630                         break;
 2631                 }
 2632 #endif
 2633                 sim = path->bus->sim;
 2634                 (*(sim->sim_action))(sim, start_ccb);
 2635                 break;
 2636         }
 2637         case XPT_ABORT:
 2638         {
 2639                 union ccb* abort_ccb;
 2640 
 2641                 abort_ccb = start_ccb->cab.abort_ccb;
 2642                 if (XPT_FC_IS_DEV_QUEUED(abort_ccb)) {
 2643 
 2644                         if (abort_ccb->ccb_h.pinfo.index >= 0) {
 2645                                 struct cam_ccbq *ccbq;
 2646                                 struct cam_ed *device;
 2647 
 2648                                 device = abort_ccb->ccb_h.path->device;
 2649                                 ccbq = &device->ccbq;
 2650                                 device->sim->devq->alloc_openings -= 
 2651                                     cam_ccbq_remove_ccb(ccbq, abort_ccb);
 2652                                 abort_ccb->ccb_h.status =
 2653                                     CAM_REQ_ABORTED|CAM_DEV_QFRZN;
 2654                                 xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
 2655                                 xpt_done(abort_ccb);
 2656                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 2657                                 break;
 2658                         }
 2659                         if (abort_ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX
 2660                          && (abort_ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) {
 2661                                 /*
 2662                                  * We've caught this ccb en route to
 2663                                  * the SIM.  Flag it for abort and the
 2664                                  * SIM will do so just before starting
 2665                                  * real work on the CCB.
 2666                                  */
 2667                                 abort_ccb->ccb_h.status =
 2668                                     CAM_REQ_ABORTED|CAM_DEV_QFRZN;
 2669                                 xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
 2670                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 2671                                 break;
 2672                         }
 2673                 }
 2674                 if (XPT_FC_IS_QUEUED(abort_ccb)
 2675                  && (abort_ccb->ccb_h.pinfo.index == CAM_DONEQ_INDEX)) {
 2676                         /*
 2677                          * It's already completed but waiting
 2678                          * for our SWI to get to it.
 2679                          */
 2680                         start_ccb->ccb_h.status = CAM_UA_ABORT;
 2681                         break;
 2682                 }
 2683                 /*
 2684                  * If we weren't able to take care of the abort request
 2685                  * in the XPT, pass the request down to the SIM for processing.
 2686                  */
 2687         }
 2688         /* FALLTHROUGH */
 2689         case XPT_ACCEPT_TARGET_IO:
 2690         case XPT_EN_LUN:
 2691         case XPT_IMMED_NOTIFY:
 2692         case XPT_NOTIFY_ACK:
 2693         case XPT_RESET_BUS:
 2694         case XPT_IMMEDIATE_NOTIFY:
 2695         case XPT_NOTIFY_ACKNOWLEDGE:
 2696         case XPT_GET_SIM_KNOB:
 2697         case XPT_SET_SIM_KNOB:
 2698         {
 2699                 struct cam_sim *sim;
 2700 
 2701                 sim = path->bus->sim;
 2702                 (*(sim->sim_action))(sim, start_ccb);
 2703                 break;
 2704         }
 2705         case XPT_PATH_INQ:
 2706         {
 2707                 struct cam_sim *sim;
 2708 
 2709                 sim = path->bus->sim;
 2710                 (*(sim->sim_action))(sim, start_ccb);
 2711                 break;
 2712         }
 2713         case XPT_PATH_STATS:
 2714                 start_ccb->cpis.last_reset = path->bus->last_reset;
 2715                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 2716                 break;
 2717         case XPT_GDEV_TYPE:
 2718         {
 2719                 struct cam_ed *dev;
 2720 
 2721                 dev = path->device;
 2722                 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
 2723                         start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
 2724                 } else {
 2725                         struct ccb_getdev *cgd;
 2726 
 2727                         cgd = &start_ccb->cgd;
 2728                         cgd->protocol = dev->protocol;
 2729                         cgd->inq_data = dev->inq_data;
 2730                         cgd->ident_data = dev->ident_data;
 2731                         cgd->inq_flags = dev->inq_flags;
 2732                         cgd->ccb_h.status = CAM_REQ_CMP;
 2733                         cgd->serial_num_len = dev->serial_num_len;
 2734                         if ((dev->serial_num_len > 0)
 2735                          && (dev->serial_num != NULL))
 2736                                 bcopy(dev->serial_num, cgd->serial_num,
 2737                                       dev->serial_num_len);
 2738                 }
 2739                 break;
 2740         }
 2741         case XPT_GDEV_STATS:
 2742         {
 2743                 struct cam_ed *dev;
 2744 
 2745                 dev = path->device;
 2746                 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
 2747                         start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
 2748                 } else {
 2749                         struct ccb_getdevstats *cgds;
 2750                         struct cam_eb *bus;
 2751                         struct cam_et *tar;
 2752 
 2753                         cgds = &start_ccb->cgds;
 2754                         bus = path->bus;
 2755                         tar = path->target;
 2756                         cgds->dev_openings = dev->ccbq.dev_openings;
 2757                         cgds->dev_active = dev->ccbq.dev_active;
 2758                         cgds->devq_openings = dev->ccbq.devq_openings;
 2759                         cgds->devq_queued = dev->ccbq.queue.entries;
 2760                         cgds->held = dev->ccbq.held;
 2761                         cgds->last_reset = tar->last_reset;
 2762                         cgds->maxtags = dev->maxtags;
 2763                         cgds->mintags = dev->mintags;
 2764                         if (timevalcmp(&tar->last_reset, &bus->last_reset, <))
 2765                                 cgds->last_reset = bus->last_reset;
 2766                         cgds->ccb_h.status = CAM_REQ_CMP;
 2767                 }
 2768                 break;
 2769         }
 2770         case XPT_GDEVLIST:
 2771         {
 2772                 struct cam_periph       *nperiph;
 2773                 struct periph_list      *periph_head;
 2774                 struct ccb_getdevlist   *cgdl;
 2775                 u_int                   i;
 2776                 struct cam_ed           *device;
 2777                 int                     found;
 2778 
 2779 
 2780                 found = 0;
 2781 
 2782                 /*
 2783                  * Don't want anyone mucking with our data.
 2784                  */
 2785                 device = path->device;
 2786                 periph_head = &device->periphs;
 2787                 cgdl = &start_ccb->cgdl;
 2788 
 2789                 /*
 2790                  * Check and see if the list has changed since the user
 2791                  * last requested a list member.  If so, tell them that the
 2792                  * list has changed, and therefore they need to start over
 2793                  * from the beginning.
 2794                  */
 2795                 if ((cgdl->index != 0) &&
 2796                     (cgdl->generation != device->generation)) {
 2797                         cgdl->status = CAM_GDEVLIST_LIST_CHANGED;
 2798                         break;
 2799                 }
 2800 
 2801                 /*
 2802                  * Traverse the list of peripherals and attempt to find
 2803                  * the requested peripheral.
 2804                  */
 2805                 for (nperiph = SLIST_FIRST(periph_head), i = 0;
 2806                      (nperiph != NULL) && (i <= cgdl->index);
 2807                      nperiph = SLIST_NEXT(nperiph, periph_links), i++) {
 2808                         if (i == cgdl->index) {
 2809                                 strncpy(cgdl->periph_name,
 2810                                         nperiph->periph_name,
 2811                                         DEV_IDLEN);
 2812                                 cgdl->unit_number = nperiph->unit_number;
 2813                                 found = 1;
 2814                         }
 2815                 }
 2816                 if (found == 0) {
 2817                         cgdl->status = CAM_GDEVLIST_ERROR;
 2818                         break;
 2819                 }
 2820 
 2821                 if (nperiph == NULL)
 2822                         cgdl->status = CAM_GDEVLIST_LAST_DEVICE;
 2823                 else
 2824                         cgdl->status = CAM_GDEVLIST_MORE_DEVS;
 2825 
 2826                 cgdl->index++;
 2827                 cgdl->generation = device->generation;
 2828 
 2829                 cgdl->ccb_h.status = CAM_REQ_CMP;
 2830                 break;
 2831         }
 2832         case XPT_DEV_MATCH:
 2833         {
 2834                 dev_pos_type position_type;
 2835                 struct ccb_dev_match *cdm;
 2836 
 2837                 cdm = &start_ccb->cdm;
 2838 
 2839                 /*
 2840                  * There are two ways of getting at information in the EDT.
 2841                  * The first way is via the primary EDT tree.  It starts
 2842                  * with a list of busses, then a list of targets on a bus,
 2843                  * then devices/luns on a target, and then peripherals on a
 2844                  * device/lun.  The "other" way is by the peripheral driver
 2845                  * lists.  The peripheral driver lists are organized by
 2846                  * peripheral driver.  (obviously)  So it makes sense to
 2847                  * use the peripheral driver list if the user is looking
 2848                  * for something like "da1", or all "da" devices.  If the
 2849                  * user is looking for something on a particular bus/target
 2850                  * or lun, it's generally better to go through the EDT tree.
 2851                  */
 2852 
 2853                 if (cdm->pos.position_type != CAM_DEV_POS_NONE)
 2854                         position_type = cdm->pos.position_type;
 2855                 else {
 2856                         u_int i;
 2857 
 2858                         position_type = CAM_DEV_POS_NONE;
 2859 
 2860                         for (i = 0; i < cdm->num_patterns; i++) {
 2861                                 if ((cdm->patterns[i].type == DEV_MATCH_BUS)
 2862                                  ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){
 2863                                         position_type = CAM_DEV_POS_EDT;
 2864                                         break;
 2865                                 }
 2866                         }
 2867 
 2868                         if (cdm->num_patterns == 0)
 2869                                 position_type = CAM_DEV_POS_EDT;
 2870                         else if (position_type == CAM_DEV_POS_NONE)
 2871                                 position_type = CAM_DEV_POS_PDRV;
 2872                 }
 2873 
 2874                 /*
 2875                  * Note that we drop the SIM lock here, because the EDT
 2876                  * traversal code needs to do its own locking.
 2877                  */
 2878                 CAM_SIM_UNLOCK(xpt_path_sim(cdm->ccb_h.path));
 2879                 switch(position_type & CAM_DEV_POS_TYPEMASK) {
 2880                 case CAM_DEV_POS_EDT:
 2881                         xptedtmatch(cdm);
 2882                         break;
 2883                 case CAM_DEV_POS_PDRV:
 2884                         xptperiphlistmatch(cdm);
 2885                         break;
 2886                 default:
 2887                         cdm->status = CAM_DEV_MATCH_ERROR;
 2888                         break;
 2889                 }
 2890                 CAM_SIM_LOCK(xpt_path_sim(cdm->ccb_h.path));
 2891 
 2892                 if (cdm->status == CAM_DEV_MATCH_ERROR)
 2893                         start_ccb->ccb_h.status = CAM_REQ_CMP_ERR;
 2894                 else
 2895                         start_ccb->ccb_h.status = CAM_REQ_CMP;
 2896 
 2897                 break;
 2898         }
 2899         case XPT_SASYNC_CB:
 2900         {
 2901                 struct ccb_setasync *csa;
 2902                 struct async_node *cur_entry;
 2903                 struct async_list *async_head;
 2904                 u_int32_t added;
 2905 
 2906                 csa = &start_ccb->csa;
 2907                 added = csa->event_enable;
 2908                 async_head = &path->device->asyncs;
 2909 
 2910                 /*
 2911                  * If there is already an entry for us, simply
 2912                  * update it.
 2913                  */
 2914                 cur_entry = SLIST_FIRST(async_head);
 2915                 while (cur_entry != NULL) {
 2916                         if ((cur_entry->callback_arg == csa->callback_arg)
 2917                          && (cur_entry->callback == csa->callback))
 2918                                 break;
 2919                         cur_entry = SLIST_NEXT(cur_entry, links);
 2920                 }
 2921 
 2922                 if (cur_entry != NULL) {
 2923                         /*
 2924                          * If the request has no flags set,
 2925                          * remove the entry.
 2926                          */
 2927                         added &= ~cur_entry->event_enable;
 2928                         if (csa->event_enable == 0) {
 2929                                 SLIST_REMOVE(async_head, cur_entry,
 2930                                              async_node, links);
 2931                                 xpt_release_device(path->device);
 2932                                 free(cur_entry, M_CAMXPT);
 2933                         } else {
 2934                                 cur_entry->event_enable = csa->event_enable;
 2935                         }
 2936                         csa->event_enable = added;
 2937                 } else {
 2938                         cur_entry = malloc(sizeof(*cur_entry), M_CAMXPT,
 2939                                            M_NOWAIT);
 2940                         if (cur_entry == NULL) {
 2941                                 csa->ccb_h.status = CAM_RESRC_UNAVAIL;
 2942                                 break;
 2943                         }
 2944                         cur_entry->event_enable = csa->event_enable;
 2945                         cur_entry->callback_arg = csa->callback_arg;
 2946                         cur_entry->callback = csa->callback;
 2947                         SLIST_INSERT_HEAD(async_head, cur_entry, links);
 2948                         xpt_acquire_device(path->device);
 2949                 }
 2950                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 2951                 break;
 2952         }
 2953         case XPT_REL_SIMQ:
 2954         {
 2955                 struct ccb_relsim *crs;
 2956                 struct cam_ed *dev;
 2957 
 2958                 crs = &start_ccb->crs;
 2959                 dev = path->device;
 2960                 if (dev == NULL) {
 2961 
 2962                         crs->ccb_h.status = CAM_DEV_NOT_THERE;
 2963                         break;
 2964                 }
 2965 
 2966                 if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) {
 2967 
 2968                         /* Don't ever go below one opening */
 2969                         if (crs->openings > 0) {
 2970                                 xpt_dev_ccbq_resize(path, crs->openings);
 2971                                 if (bootverbose) {
 2972                                         xpt_print(path,
 2973                                             "number of openings is now %d\n",
 2974                                             crs->openings);
 2975                                 }
 2976                         }
 2977                 }
 2978 
 2979                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) {
 2980 
 2981                         if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
 2982 
 2983                                 /*
 2984                                  * Just extend the old timeout and decrement
 2985                                  * the freeze count so that a single timeout
 2986                                  * is sufficient for releasing the queue.
 2987                                  */
 2988                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
 2989                                 callout_stop(&dev->callout);
 2990                         } else {
 2991 
 2992                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
 2993                         }
 2994 
 2995                         callout_reset(&dev->callout,
 2996                             (crs->release_timeout * hz) / 1000,
 2997                             xpt_release_devq_timeout, dev);
 2998 
 2999                         dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING;
 3000 
 3001                 }
 3002 
 3003                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) {
 3004 
 3005                         if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) {
 3006                                 /*
 3007                                  * Decrement the freeze count so that a single
 3008                                  * completion is still sufficient to unfreeze
 3009                                  * the queue.
 3010                                  */
 3011                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
 3012                         } else {
 3013 
 3014                                 dev->flags |= CAM_DEV_REL_ON_COMPLETE;
 3015                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
 3016                         }
 3017                 }
 3018 
 3019                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) {
 3020 
 3021                         if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
 3022                          || (dev->ccbq.dev_active == 0)) {
 3023 
 3024                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
 3025                         } else {
 3026 
 3027                                 dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY;
 3028                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
 3029                         }
 3030                 }
 3031 
 3032                 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0) {
 3033                         xpt_release_devq_rl(path, /*runlevel*/
 3034                             (crs->release_flags & RELSIM_RELEASE_RUNLEVEL) ?
 3035                                 crs->release_timeout : 0,
 3036                             /*count*/1, /*run_queue*/TRUE);
 3037                 }
 3038                 start_ccb->crs.qfrozen_cnt = dev->ccbq.queue.qfrozen_cnt[0];
 3039                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 3040                 break;
 3041         }
 3042         case XPT_DEBUG: {
 3043                 struct cam_path *oldpath;
 3044                 struct cam_sim *oldsim;
 3045 
 3046                 /* Check that all request bits are supported. */
 3047                 if (start_ccb->cdbg.flags & ~(CAM_DEBUG_COMPILE)) {
 3048                         start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
 3049                         break;
 3050                 }
 3051 
 3052                 cam_dflags = CAM_DEBUG_NONE;
 3053                 if (cam_dpath != NULL) {
 3054                         /* To release the old path we must hold proper lock. */
 3055                         oldpath = cam_dpath;
 3056                         cam_dpath = NULL;
 3057                         oldsim = xpt_path_sim(oldpath);
 3058                         CAM_SIM_UNLOCK(xpt_path_sim(start_ccb->ccb_h.path));
 3059                         CAM_SIM_LOCK(oldsim);
 3060                         xpt_free_path(oldpath);
 3061                         CAM_SIM_UNLOCK(oldsim);
 3062                         CAM_SIM_LOCK(xpt_path_sim(start_ccb->ccb_h.path));
 3063                 }
 3064                 if (start_ccb->cdbg.flags != CAM_DEBUG_NONE) {
 3065                         if (xpt_create_path(&cam_dpath, NULL,
 3066                                             start_ccb->ccb_h.path_id,
 3067                                             start_ccb->ccb_h.target_id,
 3068                                             start_ccb->ccb_h.target_lun) !=
 3069                                             CAM_REQ_CMP) {
 3070                                 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
 3071                         } else {
 3072                                 cam_dflags = start_ccb->cdbg.flags;
 3073                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 3074                                 xpt_print(cam_dpath, "debugging flags now %x\n",
 3075                                     cam_dflags);
 3076                         }
 3077                 } else
 3078                         start_ccb->ccb_h.status = CAM_REQ_CMP;
 3079                 break;
 3080         }
 3081         case XPT_FREEZE_QUEUE:
 3082         {
 3083                 struct ccb_relsim *crs = &start_ccb->crs;
 3084 
 3085                 xpt_freeze_devq_rl(path, /*runlevel*/
 3086                     (crs->release_flags & RELSIM_RELEASE_RUNLEVEL) ?
 3087                     crs->release_timeout : 0, /*count*/1);
 3088                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 3089                 break;
 3090         }
 3091         case XPT_NOOP:
 3092                 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0)
 3093                         xpt_freeze_devq(path, 1);
 3094                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 3095                 break;
 3096         default:
 3097         case XPT_SDEV_TYPE:
 3098         case XPT_TERM_IO:
 3099         case XPT_ENG_INQ:
 3100                 /* XXX Implement */
 3101                 printf("%s: CCB type %#x not supported\n", __func__,
 3102                        start_ccb->ccb_h.func_code);
 3103                 start_ccb->ccb_h.status = CAM_PROVIDE_FAIL;
 3104                 if (start_ccb->ccb_h.func_code & XPT_FC_DEV_QUEUED) {
 3105                         xpt_done(start_ccb);
 3106                 }
 3107                 break;
 3108         }
 3109 }
 3110 
 3111 void
 3112 xpt_polled_action(union ccb *start_ccb)
 3113 {
 3114         u_int32_t timeout;
 3115         struct    cam_sim *sim;
 3116         struct    cam_devq *devq;
 3117         struct    cam_ed *dev;
 3118 
 3119 
 3120         timeout = start_ccb->ccb_h.timeout * 10;
 3121         sim = start_ccb->ccb_h.path->bus->sim;
 3122         devq = sim->devq;
 3123         dev = start_ccb->ccb_h.path->device;
 3124 
 3125         mtx_assert(sim->mtx, MA_OWNED);
 3126 
 3127         /* Don't use ISR for this SIM while polling. */
 3128         sim->flags |= CAM_SIM_POLLED;
 3129 
 3130         /*
 3131          * Steal an opening so that no other queued requests
 3132          * can get it before us while we simulate interrupts.
 3133          */
 3134         dev->ccbq.devq_openings--;
 3135         dev->ccbq.dev_openings--;
 3136 
 3137         while(((devq != NULL && devq->send_openings <= 0) ||
 3138            dev->ccbq.dev_openings < 0) && (--timeout > 0)) {
 3139                 DELAY(100);
 3140                 (*(sim->sim_poll))(sim);
 3141                 camisr_runqueue(&sim->sim_doneq);
 3142         }
 3143 
 3144         dev->ccbq.devq_openings++;
 3145         dev->ccbq.dev_openings++;
 3146 
 3147         if (timeout != 0) {
 3148                 xpt_action(start_ccb);
 3149                 while(--timeout > 0) {
 3150                         (*(sim->sim_poll))(sim);
 3151                         camisr_runqueue(&sim->sim_doneq);
 3152                         if ((start_ccb->ccb_h.status  & CAM_STATUS_MASK)
 3153                             != CAM_REQ_INPROG)
 3154                                 break;
 3155                         DELAY(100);
 3156                 }
 3157                 if (timeout == 0) {
 3158                         /*
 3159                          * XXX Is it worth adding a sim_timeout entry
 3160                          * point so we can attempt recovery?  If
 3161                          * this is only used for dumps, I don't think
 3162                          * it is.
 3163                          */
 3164                         start_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
 3165                 }
 3166         } else {
 3167                 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
 3168         }
 3169 
 3170         /* We will use CAM ISR for this SIM again. */
 3171         sim->flags &= ~CAM_SIM_POLLED;
 3172 }
 3173 
 3174 /*
 3175  * Schedule a peripheral driver to receive a ccb when it's
 3176  * target device has space for more transactions.
 3177  */
 3178 void
 3179 xpt_schedule(struct cam_periph *perph, u_int32_t new_priority)
 3180 {
 3181         struct cam_ed *device;
 3182         int runq = 0;
 3183 
 3184         mtx_assert(perph->sim->mtx, MA_OWNED);
 3185 
 3186         CAM_DEBUG(perph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n"));
 3187         device = perph->path->device;
 3188         if (periph_is_queued(perph)) {
 3189                 /* Simply reorder based on new priority */
 3190                 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
 3191                           ("   change priority to %d\n", new_priority));
 3192                 if (new_priority < perph->pinfo.priority) {
 3193                         camq_change_priority(&device->drvq,
 3194                                              perph->pinfo.index,
 3195                                              new_priority);
 3196                         runq = xpt_schedule_dev_allocq(perph->path->bus, device);
 3197                 }
 3198         } else {
 3199                 /* New entry on the queue */
 3200                 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
 3201                           ("   added periph to queue\n"));
 3202                 perph->pinfo.priority = new_priority;
 3203                 perph->pinfo.generation = ++device->drvq.generation;
 3204                 camq_insert(&device->drvq, &perph->pinfo);
 3205                 runq = xpt_schedule_dev_allocq(perph->path->bus, device);
 3206         }
 3207         if (runq != 0) {
 3208                 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
 3209                           ("   calling xpt_run_devq\n"));
 3210                 xpt_run_dev_allocq(perph->path->bus);
 3211         }
 3212 }
 3213 
 3214 
 3215 /*
 3216  * Schedule a device to run on a given queue.
 3217  * If the device was inserted as a new entry on the queue,
 3218  * return 1 meaning the device queue should be run. If we
 3219  * were already queued, implying someone else has already
 3220  * started the queue, return 0 so the caller doesn't attempt
 3221  * to run the queue.
 3222  */
 3223 int
 3224 xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo,
 3225                  u_int32_t new_priority)
 3226 {
 3227         int retval;
 3228         u_int32_t old_priority;
 3229 
 3230         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_schedule_dev\n"));
 3231 
 3232         old_priority = pinfo->priority;
 3233 
 3234         /*
 3235          * Are we already queued?
 3236          */
 3237         if (pinfo->index != CAM_UNQUEUED_INDEX) {
 3238                 /* Simply reorder based on new priority */
 3239                 if (new_priority < old_priority) {
 3240                         camq_change_priority(queue, pinfo->index,
 3241                                              new_priority);
 3242                         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3243                                         ("changed priority to %d\n",
 3244                                          new_priority));
 3245                         retval = 1;
 3246                 } else
 3247                         retval = 0;
 3248         } else {
 3249                 /* New entry on the queue */
 3250                 if (new_priority < old_priority)
 3251                         pinfo->priority = new_priority;
 3252 
 3253                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3254                                 ("Inserting onto queue\n"));
 3255                 pinfo->generation = ++queue->generation;
 3256                 camq_insert(queue, pinfo);
 3257                 retval = 1;
 3258         }
 3259         return (retval);
 3260 }
 3261 
 3262 static void
 3263 xpt_run_dev_allocq(struct cam_eb *bus)
 3264 {
 3265         struct  cam_devq *devq;
 3266 
 3267         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_allocq\n"));
 3268         devq = bus->sim->devq;
 3269 
 3270         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3271                         ("   qfrozen_cnt == 0x%x, entries == %d, "
 3272                          "openings == %d, active == %d\n",
 3273                          devq->alloc_queue.qfrozen_cnt[0],
 3274                          devq->alloc_queue.entries,
 3275                          devq->alloc_openings,
 3276                          devq->alloc_active));
 3277 
 3278         devq->alloc_queue.qfrozen_cnt[0]++;
 3279         while ((devq->alloc_queue.entries > 0)
 3280             && (devq->alloc_openings > 0)
 3281             && (devq->alloc_queue.qfrozen_cnt[0] <= 1)) {
 3282                 struct  cam_ed_qinfo *qinfo;
 3283                 struct  cam_ed *device;
 3284                 union   ccb *work_ccb;
 3285                 struct  cam_periph *drv;
 3286                 struct  camq *drvq;
 3287 
 3288                 qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->alloc_queue,
 3289                                                            CAMQ_HEAD);
 3290                 device = qinfo->device;
 3291                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3292                                 ("running device %p\n", device));
 3293 
 3294                 drvq = &device->drvq;
 3295                 KASSERT(drvq->entries > 0, ("xpt_run_dev_allocq: "
 3296                     "Device on queue without any work to do"));
 3297                 if ((work_ccb = xpt_get_ccb(device)) != NULL) {
 3298                         devq->alloc_openings--;
 3299                         devq->alloc_active++;
 3300                         drv = (struct cam_periph*)camq_remove(drvq, CAMQ_HEAD);
 3301                         xpt_setup_ccb(&work_ccb->ccb_h, drv->path,
 3302                                       drv->pinfo.priority);
 3303                         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3304                                         ("calling periph start\n"));
 3305                         drv->periph_start(drv, work_ccb);
 3306                 } else {
 3307                         /*
 3308                          * Malloc failure in alloc_ccb
 3309                          */
 3310                         /*
 3311                          * XXX add us to a list to be run from free_ccb
 3312                          * if we don't have any ccbs active on this
 3313                          * device queue otherwise we may never get run
 3314                          * again.
 3315                          */
 3316                         break;
 3317                 }
 3318 
 3319                 /* We may have more work. Attempt to reschedule. */
 3320                 xpt_schedule_dev_allocq(bus, device);
 3321         }
 3322         devq->alloc_queue.qfrozen_cnt[0]--;
 3323 }
 3324 
 3325 static void
 3326 xpt_run_dev_sendq(struct cam_eb *bus)
 3327 {
 3328         struct  cam_devq *devq;
 3329         char cdb_str[(SCSI_MAX_CDBLEN * 3) + 1];
 3330 
 3331         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_sendq\n"));
 3332 
 3333         devq = bus->sim->devq;
 3334 
 3335         devq->send_queue.qfrozen_cnt[0]++;
 3336         while ((devq->send_queue.entries > 0)
 3337             && (devq->send_openings > 0)
 3338             && (devq->send_queue.qfrozen_cnt[0] <= 1)) {
 3339                 struct  cam_ed_qinfo *qinfo;
 3340                 struct  cam_ed *device;
 3341                 union ccb *work_ccb;
 3342                 struct  cam_sim *sim;
 3343 
 3344                 qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->send_queue,
 3345                                                            CAMQ_HEAD);
 3346                 device = qinfo->device;
 3347                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3348                                 ("running device %p\n", device));
 3349 
 3350                 work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD);
 3351                 if (work_ccb == NULL) {
 3352                         printf("device on run queue with no ccbs???\n");
 3353                         continue;
 3354                 }
 3355 
 3356                 if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) {
 3357 
 3358                         mtx_lock(&xsoftc.xpt_lock);
 3359                         if (xsoftc.num_highpower <= 0) {
 3360                                 /*
 3361                                  * We got a high power command, but we
 3362                                  * don't have any available slots.  Freeze
 3363                                  * the device queue until we have a slot
 3364                                  * available.
 3365                                  */
 3366                                 xpt_freeze_devq(work_ccb->ccb_h.path, 1);
 3367                                 STAILQ_INSERT_TAIL(&xsoftc.highpowerq,
 3368                                                    &work_ccb->ccb_h,
 3369                                                    xpt_links.stqe);
 3370 
 3371                                 mtx_unlock(&xsoftc.xpt_lock);
 3372                                 continue;
 3373                         } else {
 3374                                 /*
 3375                                  * Consume a high power slot while
 3376                                  * this ccb runs.
 3377                                  */
 3378                                 xsoftc.num_highpower--;
 3379                         }
 3380                         mtx_unlock(&xsoftc.xpt_lock);
 3381                 }
 3382                 cam_ccbq_remove_ccb(&device->ccbq, work_ccb);
 3383                 cam_ccbq_send_ccb(&device->ccbq, work_ccb);
 3384 
 3385                 devq->send_openings--;
 3386                 devq->send_active++;
 3387 
 3388                 xpt_schedule_dev_sendq(bus, device);
 3389 
 3390                 if (work_ccb && (work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0){
 3391                         /*
 3392                          * The client wants to freeze the queue
 3393                          * after this CCB is sent.
 3394                          */
 3395                         xpt_freeze_devq(work_ccb->ccb_h.path, 1);
 3396                 }
 3397 
 3398                 /* In Target mode, the peripheral driver knows best... */
 3399                 if (work_ccb->ccb_h.func_code == XPT_SCSI_IO) {
 3400                         if ((device->inq_flags & SID_CmdQue) != 0
 3401                          && work_ccb->csio.tag_action != CAM_TAG_ACTION_NONE)
 3402                                 work_ccb->ccb_h.flags |= CAM_TAG_ACTION_VALID;
 3403                         else
 3404                                 /*
 3405                                  * Clear this in case of a retried CCB that
 3406                                  * failed due to a rejected tag.
 3407                                  */
 3408                                 work_ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
 3409                 }
 3410 
 3411                 switch (work_ccb->ccb_h.func_code) {
 3412                 case XPT_SCSI_IO:
 3413                         CAM_DEBUG(work_ccb->ccb_h.path,
 3414                             CAM_DEBUG_CDB,("%s. CDB: %s\n",
 3415                              scsi_op_desc(work_ccb->csio.cdb_io.cdb_bytes[0],
 3416                                           &device->inq_data),
 3417                              scsi_cdb_string(work_ccb->csio.cdb_io.cdb_bytes,
 3418                                              cdb_str, sizeof(cdb_str))));
 3419                         break;
 3420                 case XPT_ATA_IO:
 3421                         CAM_DEBUG(work_ccb->ccb_h.path,
 3422                             CAM_DEBUG_CDB,("%s. ACB: %s\n",
 3423                              ata_op_string(&work_ccb->ataio.cmd),
 3424                              ata_cmd_string(&work_ccb->ataio.cmd,
 3425                                             cdb_str, sizeof(cdb_str))));
 3426                         break;
 3427                 default:
 3428                         break;
 3429                 }
 3430 
 3431                 /*
 3432                  * Device queues can be shared among multiple sim instances
 3433                  * that reside on different busses.  Use the SIM in the queue
 3434                  * CCB's path, rather than the one in the bus that was passed
 3435                  * into this function.
 3436                  */
 3437                 sim = work_ccb->ccb_h.path->bus->sim;
 3438                 (*(sim->sim_action))(sim, work_ccb);
 3439         }
 3440         devq->send_queue.qfrozen_cnt[0]--;
 3441 }
 3442 
 3443 /*
 3444  * This function merges stuff from the slave ccb into the master ccb, while
 3445  * keeping important fields in the master ccb constant.
 3446  */
 3447 void
 3448 xpt_merge_ccb(union ccb *master_ccb, union ccb *slave_ccb)
 3449 {
 3450 
 3451         /*
 3452          * Pull fields that are valid for peripheral drivers to set
 3453          * into the master CCB along with the CCB "payload".
 3454          */
 3455         master_ccb->ccb_h.retry_count = slave_ccb->ccb_h.retry_count;
 3456         master_ccb->ccb_h.func_code = slave_ccb->ccb_h.func_code;
 3457         master_ccb->ccb_h.timeout = slave_ccb->ccb_h.timeout;
 3458         master_ccb->ccb_h.flags = slave_ccb->ccb_h.flags;
 3459         bcopy(&(&slave_ccb->ccb_h)[1], &(&master_ccb->ccb_h)[1],
 3460               sizeof(union ccb) - sizeof(struct ccb_hdr));
 3461 }
 3462 
 3463 void
 3464 xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority)
 3465 {
 3466 
 3467         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_setup_ccb\n"));
 3468         ccb_h->pinfo.priority = priority;
 3469         ccb_h->path = path;
 3470         ccb_h->path_id = path->bus->path_id;
 3471         if (path->target)
 3472                 ccb_h->target_id = path->target->target_id;
 3473         else
 3474                 ccb_h->target_id = CAM_TARGET_WILDCARD;
 3475         if (path->device) {
 3476                 ccb_h->target_lun = path->device->lun_id;
 3477                 ccb_h->pinfo.generation = ++path->device->ccbq.queue.generation;
 3478         } else {
 3479                 ccb_h->target_lun = CAM_TARGET_WILDCARD;
 3480         }
 3481         ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
 3482         ccb_h->flags = 0;
 3483 }
 3484 
 3485 /* Path manipulation functions */
 3486 cam_status
 3487 xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph,
 3488                 path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
 3489 {
 3490         struct     cam_path *path;
 3491         cam_status status;
 3492 
 3493         path = (struct cam_path *)malloc(sizeof(*path), M_CAMPATH, M_NOWAIT);
 3494 
 3495         if (path == NULL) {
 3496                 status = CAM_RESRC_UNAVAIL;
 3497                 return(status);
 3498         }
 3499         status = xpt_compile_path(path, perph, path_id, target_id, lun_id);
 3500         if (status != CAM_REQ_CMP) {
 3501                 free(path, M_CAMPATH);
 3502                 path = NULL;
 3503         }
 3504         *new_path_ptr = path;
 3505         return (status);
 3506 }
 3507 
 3508 cam_status
 3509 xpt_create_path_unlocked(struct cam_path **new_path_ptr,
 3510                          struct cam_periph *periph, path_id_t path_id,
 3511                          target_id_t target_id, lun_id_t lun_id)
 3512 {
 3513         struct     cam_path *path;
 3514         struct     cam_eb *bus = NULL;
 3515         cam_status status;
 3516 
 3517         path = (struct cam_path *)malloc(sizeof(*path), M_CAMPATH, M_WAITOK);
 3518 
 3519         bus = xpt_find_bus(path_id);
 3520         if (bus != NULL)
 3521                 CAM_SIM_LOCK(bus->sim);
 3522         status = xpt_compile_path(path, periph, path_id, target_id, lun_id);
 3523         if (bus != NULL) {
 3524                 CAM_SIM_UNLOCK(bus->sim);
 3525                 xpt_release_bus(bus);
 3526         }
 3527         if (status != CAM_REQ_CMP) {
 3528                 free(path, M_CAMPATH);
 3529                 path = NULL;
 3530         }
 3531         *new_path_ptr = path;
 3532         return (status);
 3533 }
 3534 
 3535 cam_status
 3536 xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph,
 3537                  path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
 3538 {
 3539         struct       cam_eb *bus;
 3540         struct       cam_et *target;
 3541         struct       cam_ed *device;
 3542         cam_status   status;
 3543 
 3544         status = CAM_REQ_CMP;   /* Completed without error */
 3545         target = NULL;          /* Wildcarded */
 3546         device = NULL;          /* Wildcarded */
 3547 
 3548         /*
 3549          * We will potentially modify the EDT, so block interrupts
 3550          * that may attempt to create cam paths.
 3551          */
 3552         bus = xpt_find_bus(path_id);
 3553         if (bus == NULL) {
 3554                 status = CAM_PATH_INVALID;
 3555         } else {
 3556                 target = xpt_find_target(bus, target_id);
 3557                 if (target == NULL) {
 3558                         /* Create one */
 3559                         struct cam_et *new_target;
 3560 
 3561                         new_target = xpt_alloc_target(bus, target_id);
 3562                         if (new_target == NULL) {
 3563                                 status = CAM_RESRC_UNAVAIL;
 3564                         } else {
 3565                                 target = new_target;
 3566                         }
 3567                 }
 3568                 if (target != NULL) {
 3569                         device = xpt_find_device(target, lun_id);
 3570                         if (device == NULL) {
 3571                                 /* Create one */
 3572                                 struct cam_ed *new_device;
 3573 
 3574                                 new_device =
 3575                                     (*(bus->xport->alloc_device))(bus,
 3576                                                                       target,
 3577                                                                       lun_id);
 3578                                 if (new_device == NULL) {
 3579                                         status = CAM_RESRC_UNAVAIL;
 3580                                 } else {
 3581                                         device = new_device;
 3582                                 }
 3583                         }
 3584                 }
 3585         }
 3586 
 3587         /*
 3588          * Only touch the user's data if we are successful.
 3589          */
 3590         if (status == CAM_REQ_CMP) {
 3591                 new_path->periph = perph;
 3592                 new_path->bus = bus;
 3593                 new_path->target = target;
 3594                 new_path->device = device;
 3595                 CAM_DEBUG(new_path, CAM_DEBUG_TRACE, ("xpt_compile_path\n"));
 3596         } else {
 3597                 if (device != NULL)
 3598                         xpt_release_device(device);
 3599                 if (target != NULL)
 3600                         xpt_release_target(target);
 3601                 if (bus != NULL)
 3602                         xpt_release_bus(bus);
 3603         }
 3604         return (status);
 3605 }
 3606 
 3607 void
 3608 xpt_release_path(struct cam_path *path)
 3609 {
 3610         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_path\n"));
 3611         if (path->device != NULL) {
 3612                 xpt_release_device(path->device);
 3613                 path->device = NULL;
 3614         }
 3615         if (path->target != NULL) {
 3616                 xpt_release_target(path->target);
 3617                 path->target = NULL;
 3618         }
 3619         if (path->bus != NULL) {
 3620                 xpt_release_bus(path->bus);
 3621                 path->bus = NULL;
 3622         }
 3623 }
 3624 
 3625 void
 3626 xpt_free_path(struct cam_path *path)
 3627 {
 3628 
 3629         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_free_path\n"));
 3630         xpt_release_path(path);
 3631         free(path, M_CAMPATH);
 3632 }
 3633 
 3634 void
 3635 xpt_path_counts(struct cam_path *path, uint32_t *bus_ref,
 3636     uint32_t *periph_ref, uint32_t *target_ref, uint32_t *device_ref)
 3637 {
 3638 
 3639         xpt_lock_buses();
 3640         if (bus_ref) {
 3641                 if (path->bus)
 3642                         *bus_ref = path->bus->refcount;
 3643                 else
 3644                         *bus_ref = 0;
 3645         }
 3646         if (periph_ref) {
 3647                 if (path->periph)
 3648                         *periph_ref = path->periph->refcount;
 3649                 else
 3650                         *periph_ref = 0;
 3651         }
 3652         xpt_unlock_buses();
 3653         if (target_ref) {
 3654                 if (path->target)
 3655                         *target_ref = path->target->refcount;
 3656                 else
 3657                         *target_ref = 0;
 3658         }
 3659         if (device_ref) {
 3660                 if (path->device)
 3661                         *device_ref = path->device->refcount;
 3662                 else
 3663                         *device_ref = 0;
 3664         }
 3665 }
 3666 
 3667 /*
 3668  * Return -1 for failure, 0 for exact match, 1 for match with wildcards
 3669  * in path1, 2 for match with wildcards in path2.
 3670  */
 3671 int
 3672 xpt_path_comp(struct cam_path *path1, struct cam_path *path2)
 3673 {
 3674         int retval = 0;
 3675 
 3676         if (path1->bus != path2->bus) {
 3677                 if (path1->bus->path_id == CAM_BUS_WILDCARD)
 3678                         retval = 1;
 3679                 else if (path2->bus->path_id == CAM_BUS_WILDCARD)
 3680                         retval = 2;
 3681                 else
 3682                         return (-1);
 3683         }
 3684         if (path1->target != path2->target) {
 3685                 if (path1->target->target_id == CAM_TARGET_WILDCARD) {
 3686                         if (retval == 0)
 3687                                 retval = 1;
 3688                 } else if (path2->target->target_id == CAM_TARGET_WILDCARD)
 3689                         retval = 2;
 3690                 else
 3691                         return (-1);
 3692         }
 3693         if (path1->device != path2->device) {
 3694                 if (path1->device->lun_id == CAM_LUN_WILDCARD) {
 3695                         if (retval == 0)
 3696                                 retval = 1;
 3697                 } else if (path2->device->lun_id == CAM_LUN_WILDCARD)
 3698                         retval = 2;
 3699                 else
 3700                         return (-1);
 3701         }
 3702         return (retval);
 3703 }
 3704 
 3705 void
 3706 xpt_print_path(struct cam_path *path)
 3707 {
 3708 
 3709         if (path == NULL)
 3710                 printf("(nopath): ");
 3711         else {
 3712                 if (path->periph != NULL)
 3713                         printf("(%s%d:", path->periph->periph_name,
 3714                                path->periph->unit_number);
 3715                 else
 3716                         printf("(noperiph:");
 3717 
 3718                 if (path->bus != NULL)
 3719                         printf("%s%d:%d:", path->bus->sim->sim_name,
 3720                                path->bus->sim->unit_number,
 3721                                path->bus->sim->bus_id);
 3722                 else
 3723                         printf("nobus:");
 3724 
 3725                 if (path->target != NULL)
 3726                         printf("%d:", path->target->target_id);
 3727                 else
 3728                         printf("X:");
 3729 
 3730                 if (path->device != NULL)
 3731                         printf("%d): ", path->device->lun_id);
 3732                 else
 3733                         printf("X): ");
 3734         }
 3735 }
 3736 
 3737 void
 3738 xpt_print(struct cam_path *path, const char *fmt, ...)
 3739 {
 3740         va_list ap;
 3741         xpt_print_path(path);
 3742         va_start(ap, fmt);
 3743         vprintf(fmt, ap);
 3744         va_end(ap);
 3745 }
 3746 
 3747 int
 3748 xpt_path_string(struct cam_path *path, char *str, size_t str_len)
 3749 {
 3750         struct sbuf sb;
 3751 
 3752 #ifdef INVARIANTS
 3753         if (path != NULL && path->bus != NULL)
 3754                 mtx_assert(path->bus->sim->mtx, MA_OWNED);
 3755 #endif
 3756 
 3757         sbuf_new(&sb, str, str_len, 0);
 3758 
 3759         if (path == NULL)
 3760                 sbuf_printf(&sb, "(nopath): ");
 3761         else {
 3762                 if (path->periph != NULL)
 3763                         sbuf_printf(&sb, "(%s%d:", path->periph->periph_name,
 3764                                     path->periph->unit_number);
 3765                 else
 3766                         sbuf_printf(&sb, "(noperiph:");
 3767 
 3768                 if (path->bus != NULL)
 3769                         sbuf_printf(&sb, "%s%d:%d:", path->bus->sim->sim_name,
 3770                                     path->bus->sim->unit_number,
 3771                                     path->bus->sim->bus_id);
 3772                 else
 3773                         sbuf_printf(&sb, "nobus:");
 3774 
 3775                 if (path->target != NULL)
 3776                         sbuf_printf(&sb, "%d:", path->target->target_id);
 3777                 else
 3778                         sbuf_printf(&sb, "X:");
 3779 
 3780                 if (path->device != NULL)
 3781                         sbuf_printf(&sb, "%d): ", path->device->lun_id);
 3782                 else
 3783                         sbuf_printf(&sb, "X): ");
 3784         }
 3785         sbuf_finish(&sb);
 3786 
 3787         return(sbuf_len(&sb));
 3788 }
 3789 
 3790 path_id_t
 3791 xpt_path_path_id(struct cam_path *path)
 3792 {
 3793         return(path->bus->path_id);
 3794 }
 3795 
 3796 target_id_t
 3797 xpt_path_target_id(struct cam_path *path)
 3798 {
 3799         if (path->target != NULL)
 3800                 return (path->target->target_id);
 3801         else
 3802                 return (CAM_TARGET_WILDCARD);
 3803 }
 3804 
 3805 lun_id_t
 3806 xpt_path_lun_id(struct cam_path *path)
 3807 {
 3808         if (path->device != NULL)
 3809                 return (path->device->lun_id);
 3810         else
 3811                 return (CAM_LUN_WILDCARD);
 3812 }
 3813 
 3814 struct cam_sim *
 3815 xpt_path_sim(struct cam_path *path)
 3816 {
 3817 
 3818         return (path->bus->sim);
 3819 }
 3820 
 3821 struct cam_periph*
 3822 xpt_path_periph(struct cam_path *path)
 3823 {
 3824         mtx_assert(path->bus->sim->mtx, MA_OWNED);
 3825 
 3826         return (path->periph);
 3827 }
 3828 
 3829 int
 3830 xpt_path_legacy_ata_id(struct cam_path *path)
 3831 {
 3832         struct cam_eb *bus;
 3833         int bus_id;
 3834 
 3835         if ((strcmp(path->bus->sim->sim_name, "ata") != 0) &&
 3836             strcmp(path->bus->sim->sim_name, "ahcich") != 0 &&
 3837             strcmp(path->bus->sim->sim_name, "mvsch") != 0 &&
 3838             strcmp(path->bus->sim->sim_name, "siisch") != 0)
 3839                 return (-1);
 3840 
 3841         if (strcmp(path->bus->sim->sim_name, "ata") == 0 &&
 3842             path->bus->sim->unit_number < 2) {
 3843                 bus_id = path->bus->sim->unit_number;
 3844         } else {
 3845                 bus_id = 2;
 3846                 xpt_lock_buses();
 3847                 TAILQ_FOREACH(bus, &xsoftc.xpt_busses, links) {
 3848                         if (bus == path->bus)
 3849                                 break;
 3850                         if ((strcmp(bus->sim->sim_name, "ata") == 0 &&
 3851                              bus->sim->unit_number >= 2) ||
 3852                             strcmp(bus->sim->sim_name, "ahcich") == 0 ||
 3853                             strcmp(bus->sim->sim_name, "mvsch") == 0 ||
 3854                             strcmp(bus->sim->sim_name, "siisch") == 0)
 3855                                 bus_id++;
 3856                 }
 3857                 xpt_unlock_buses();
 3858         }
 3859         if (path->target != NULL) {
 3860                 if (path->target->target_id < 2)
 3861                         return (bus_id * 2 + path->target->target_id);
 3862                 else
 3863                         return (-1);
 3864         } else
 3865                 return (bus_id * 2);
 3866 }
 3867 
 3868 /*
 3869  * Release a CAM control block for the caller.  Remit the cost of the structure
 3870  * to the device referenced by the path.  If the this device had no 'credits'
 3871  * and peripheral drivers have registered async callbacks for this notification
 3872  * call them now.
 3873  */
 3874 void
 3875 xpt_release_ccb(union ccb *free_ccb)
 3876 {
 3877         struct   cam_path *path;
 3878         struct   cam_ed *device;
 3879         struct   cam_eb *bus;
 3880         struct   cam_sim *sim;
 3881 
 3882         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_release_ccb\n"));
 3883         path = free_ccb->ccb_h.path;
 3884         device = path->device;
 3885         bus = path->bus;
 3886         sim = bus->sim;
 3887 
 3888         mtx_assert(sim->mtx, MA_OWNED);
 3889 
 3890         cam_ccbq_release_opening(&device->ccbq);
 3891         if (device->flags & CAM_DEV_RESIZE_QUEUE_NEEDED) {
 3892                 device->flags &= ~CAM_DEV_RESIZE_QUEUE_NEEDED;
 3893                 cam_ccbq_resize(&device->ccbq,
 3894                     device->ccbq.dev_openings + device->ccbq.dev_active);
 3895         }
 3896         if (sim->ccb_count > sim->max_ccbs) {
 3897                 xpt_free_ccb(free_ccb);
 3898                 sim->ccb_count--;
 3899         } else {
 3900                 SLIST_INSERT_HEAD(&sim->ccb_freeq, &free_ccb->ccb_h,
 3901                     xpt_links.sle);
 3902         }
 3903         if (sim->devq == NULL) {
 3904                 return;
 3905         }
 3906         sim->devq->alloc_openings++;
 3907         sim->devq->alloc_active--;
 3908         if (device_is_alloc_queued(device) == 0)
 3909                 xpt_schedule_dev_allocq(bus, device);
 3910         xpt_run_dev_allocq(bus);
 3911 }
 3912 
 3913 /* Functions accessed by SIM drivers */
 3914 
 3915 static struct xpt_xport xport_default = {
 3916         .alloc_device = xpt_alloc_device_default,
 3917         .action = xpt_action_default,
 3918         .async = xpt_dev_async_default,
 3919 };
 3920 
 3921 /*
 3922  * A sim structure, listing the SIM entry points and instance
 3923  * identification info is passed to xpt_bus_register to hook the SIM
 3924  * into the CAM framework.  xpt_bus_register creates a cam_eb entry
 3925  * for this new bus and places it in the array of busses and assigns
 3926  * it a path_id.  The path_id may be influenced by "hard wiring"
 3927  * information specified by the user.  Once interrupt services are
 3928  * available, the bus will be probed.
 3929  */
 3930 int32_t
 3931 xpt_bus_register(struct cam_sim *sim, device_t parent, u_int32_t bus)
 3932 {
 3933         struct cam_eb *new_bus;
 3934         struct cam_eb *old_bus;
 3935         struct ccb_pathinq cpi;
 3936         struct cam_path *path;
 3937         cam_status status;
 3938 
 3939         mtx_assert(sim->mtx, MA_OWNED);
 3940 
 3941         sim->bus_id = bus;
 3942         new_bus = (struct cam_eb *)malloc(sizeof(*new_bus),
 3943                                           M_CAMXPT, M_NOWAIT);
 3944         if (new_bus == NULL) {
 3945                 /* Couldn't satisfy request */
 3946                 return (CAM_RESRC_UNAVAIL);
 3947         }
 3948         if (strcmp(sim->sim_name, "xpt") != 0) {
 3949                 sim->path_id =
 3950                     xptpathid(sim->sim_name, sim->unit_number, sim->bus_id);
 3951         }
 3952 
 3953         TAILQ_INIT(&new_bus->et_entries);
 3954         new_bus->path_id = sim->path_id;
 3955         cam_sim_hold(sim);
 3956         new_bus->sim = sim;
 3957         timevalclear(&new_bus->last_reset);
 3958         new_bus->flags = 0;
 3959         new_bus->refcount = 1;  /* Held until a bus_deregister event */
 3960         new_bus->generation = 0;
 3961 
 3962         xpt_lock_buses();
 3963         old_bus = TAILQ_FIRST(&xsoftc.xpt_busses);
 3964         while (old_bus != NULL
 3965             && old_bus->path_id < new_bus->path_id)
 3966                 old_bus = TAILQ_NEXT(old_bus, links);
 3967         if (old_bus != NULL)
 3968                 TAILQ_INSERT_BEFORE(old_bus, new_bus, links);
 3969         else
 3970                 TAILQ_INSERT_TAIL(&xsoftc.xpt_busses, new_bus, links);
 3971         xsoftc.bus_generation++;
 3972         xpt_unlock_buses();
 3973 
 3974         /*
 3975          * Set a default transport so that a PATH_INQ can be issued to
 3976          * the SIM.  This will then allow for probing and attaching of
 3977          * a more appropriate transport.
 3978          */
 3979         new_bus->xport = &xport_default;
 3980 
 3981         status = xpt_create_path(&path, /*periph*/NULL, sim->path_id,
 3982                                   CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
 3983         if (status != CAM_REQ_CMP) {
 3984                 xpt_release_bus(new_bus);
 3985                 free(path, M_CAMXPT);
 3986                 return (CAM_RESRC_UNAVAIL);
 3987         }
 3988 
 3989         xpt_setup_ccb(&cpi.ccb_h, path, CAM_PRIORITY_NORMAL);
 3990         cpi.ccb_h.func_code = XPT_PATH_INQ;
 3991         xpt_action((union ccb *)&cpi);
 3992 
 3993         if (cpi.ccb_h.status == CAM_REQ_CMP) {
 3994                 switch (cpi.transport) {
 3995                 case XPORT_SPI:
 3996                 case XPORT_SAS:
 3997                 case XPORT_FC:
 3998                 case XPORT_USB:
 3999                 case XPORT_ISCSI:
 4000                 case XPORT_PPB:
 4001                         new_bus->xport = scsi_get_xport();
 4002                         break;
 4003                 case XPORT_ATA:
 4004                 case XPORT_SATA:
 4005                         new_bus->xport = ata_get_xport();
 4006                         break;
 4007                 default:
 4008                         new_bus->xport = &xport_default;
 4009                         break;
 4010                 }
 4011         }
 4012 
 4013         /* Notify interested parties */
 4014         if (sim->path_id != CAM_XPT_PATH_ID) {
 4015                 union   ccb *scan_ccb;
 4016 
 4017                 xpt_async(AC_PATH_REGISTERED, path, &cpi);
 4018                 /* Initiate bus rescan. */
 4019                 scan_ccb = xpt_alloc_ccb_nowait();
 4020                 scan_ccb->ccb_h.path = path;
 4021                 scan_ccb->ccb_h.func_code = XPT_SCAN_BUS;
 4022                 scan_ccb->crcn.flags = 0;
 4023                 xpt_rescan(scan_ccb);
 4024         } else
 4025                 xpt_free_path(path);
 4026         return (CAM_SUCCESS);
 4027 }
 4028 
 4029 int32_t
 4030 xpt_bus_deregister(path_id_t pathid)
 4031 {
 4032         struct cam_path bus_path;
 4033         cam_status status;
 4034 
 4035         status = xpt_compile_path(&bus_path, NULL, pathid,
 4036                                   CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
 4037         if (status != CAM_REQ_CMP)
 4038                 return (status);
 4039 
 4040         xpt_async(AC_LOST_DEVICE, &bus_path, NULL);
 4041         xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL);
 4042 
 4043         /* Release the reference count held while registered. */
 4044         xpt_release_bus(bus_path.bus);
 4045         xpt_release_path(&bus_path);
 4046 
 4047         return (CAM_REQ_CMP);
 4048 }
 4049 
 4050 static path_id_t
 4051 xptnextfreepathid(void)
 4052 {
 4053         struct cam_eb *bus;
 4054         path_id_t pathid;
 4055         const char *strval;
 4056 
 4057         pathid = 0;
 4058         xpt_lock_buses();
 4059         bus = TAILQ_FIRST(&xsoftc.xpt_busses);
 4060 retry:
 4061         /* Find an unoccupied pathid */
 4062         while (bus != NULL && bus->path_id <= pathid) {
 4063                 if (bus->path_id == pathid)
 4064                         pathid++;
 4065                 bus = TAILQ_NEXT(bus, links);
 4066         }
 4067         xpt_unlock_buses();
 4068 
 4069         /*
 4070          * Ensure that this pathid is not reserved for
 4071          * a bus that may be registered in the future.
 4072          */
 4073         if (resource_string_value("scbus", pathid, "at", &strval) == 0) {
 4074                 ++pathid;
 4075                 /* Start the search over */
 4076                 xpt_lock_buses();
 4077                 goto retry;
 4078         }
 4079         return (pathid);
 4080 }
 4081 
 4082 static path_id_t
 4083 xptpathid(const char *sim_name, int sim_unit, int sim_bus)
 4084 {
 4085         path_id_t pathid;
 4086         int i, dunit, val;
 4087         char buf[32];
 4088         const char *dname;
 4089 
 4090         pathid = CAM_XPT_PATH_ID;
 4091         snprintf(buf, sizeof(buf), "%s%d", sim_name, sim_unit);
 4092         i = 0;
 4093         while ((resource_find_match(&i, &dname, &dunit, "at", buf)) == 0) {
 4094                 if (strcmp(dname, "scbus")) {
 4095                         /* Avoid a bit of foot shooting. */
 4096                         continue;
 4097                 }
 4098                 if (dunit < 0)          /* unwired?! */
 4099                         continue;
 4100                 if (resource_int_value("scbus", dunit, "bus", &val) == 0) {
 4101                         if (sim_bus == val) {
 4102                                 pathid = dunit;
 4103                                 break;
 4104                         }
 4105                 } else if (sim_bus == 0) {
 4106                         /* Unspecified matches bus 0 */
 4107                         pathid = dunit;
 4108                         break;
 4109                 } else {
 4110                         printf("Ambiguous scbus configuration for %s%d "
 4111                                "bus %d, cannot wire down.  The kernel "
 4112                                "config entry for scbus%d should "
 4113                                "specify a controller bus.\n"
 4114                                "Scbus will be assigned dynamically.\n",
 4115                                sim_name, sim_unit, sim_bus, dunit);
 4116                         break;
 4117                 }
 4118         }
 4119 
 4120         if (pathid == CAM_XPT_PATH_ID)
 4121                 pathid = xptnextfreepathid();
 4122         return (pathid);
 4123 }
 4124 
 4125 static const char *
 4126 xpt_async_string(u_int32_t async_code)
 4127 {
 4128 
 4129         switch (async_code) {
 4130         case AC_BUS_RESET: return ("AC_BUS_RESET");
 4131         case AC_UNSOL_RESEL: return ("AC_UNSOL_RESEL");
 4132         case AC_SCSI_AEN: return ("AC_SCSI_AEN");
 4133         case AC_SENT_BDR: return ("AC_SENT_BDR");
 4134         case AC_PATH_REGISTERED: return ("AC_PATH_REGISTERED");
 4135         case AC_PATH_DEREGISTERED: return ("AC_PATH_DEREGISTERED");
 4136         case AC_FOUND_DEVICE: return ("AC_FOUND_DEVICE");
 4137         case AC_LOST_DEVICE: return ("AC_LOST_DEVICE");
 4138         case AC_TRANSFER_NEG: return ("AC_TRANSFER_NEG");
 4139         case AC_INQ_CHANGED: return ("AC_INQ_CHANGED");
 4140         case AC_GETDEV_CHANGED: return ("AC_GETDEV_CHANGED");
 4141         case AC_CONTRACT: return ("AC_CONTRACT");
 4142         case AC_ADVINFO_CHANGED: return ("AC_ADVINFO_CHANGED");
 4143         case AC_UNIT_ATTENTION: return ("AC_UNIT_ATTENTION");
 4144         }
 4145         return ("AC_UNKNOWN");
 4146 }
 4147 
 4148 void
 4149 xpt_async(u_int32_t async_code, struct cam_path *path, void *async_arg)
 4150 {
 4151         struct cam_eb *bus;
 4152         struct cam_et *target, *next_target;
 4153         struct cam_ed *device, *next_device;
 4154 
 4155         mtx_assert(path->bus->sim->mtx, MA_OWNED);
 4156         CAM_DEBUG(path, CAM_DEBUG_TRACE | CAM_DEBUG_INFO,
 4157             ("xpt_async(%s)\n", xpt_async_string(async_code)));
 4158 
 4159         /*
 4160          * Most async events come from a CAM interrupt context.  In
 4161          * a few cases, the error recovery code at the peripheral layer,
 4162          * which may run from our SWI or a process context, may signal
 4163          * deferred events with a call to xpt_async.
 4164          */
 4165 
 4166         bus = path->bus;
 4167 
 4168         if (async_code == AC_BUS_RESET) {
 4169                 /* Update our notion of when the last reset occurred */
 4170                 microtime(&bus->last_reset);
 4171         }
 4172 
 4173         for (target = TAILQ_FIRST(&bus->et_entries);
 4174              target != NULL;
 4175              target = next_target) {
 4176 
 4177                 next_target = TAILQ_NEXT(target, links);
 4178 
 4179                 if (path->target != target
 4180                  && path->target->target_id != CAM_TARGET_WILDCARD
 4181                  && target->target_id != CAM_TARGET_WILDCARD)
 4182                         continue;
 4183 
 4184                 if (async_code == AC_SENT_BDR) {
 4185                         /* Update our notion of when the last reset occurred */
 4186                         microtime(&path->target->last_reset);
 4187                 }
 4188 
 4189                 for (device = TAILQ_FIRST(&target->ed_entries);
 4190                      device != NULL;
 4191                      device = next_device) {
 4192 
 4193                         next_device = TAILQ_NEXT(device, links);
 4194 
 4195                         if (path->device != device
 4196                          && path->device->lun_id != CAM_LUN_WILDCARD
 4197                          && device->lun_id != CAM_LUN_WILDCARD)
 4198                                 continue;
 4199                         /*
 4200                          * The async callback could free the device.
 4201                          * If it is a broadcast async, it doesn't hold
 4202                          * device reference, so take our own reference.
 4203                          */
 4204                         xpt_acquire_device(device);
 4205                         (*(bus->xport->async))(async_code, bus,
 4206                                                target, device,
 4207                                                async_arg);
 4208 
 4209                         xpt_async_bcast(&device->asyncs, async_code,
 4210                                         path, async_arg);
 4211                         xpt_release_device(device);
 4212                 }
 4213         }
 4214 
 4215         /*
 4216          * If this wasn't a fully wildcarded async, tell all
 4217          * clients that want all async events.
 4218          */
 4219         if (bus != xpt_periph->path->bus)
 4220                 xpt_async_bcast(&xpt_periph->path->device->asyncs, async_code,
 4221                                 path, async_arg);
 4222 }
 4223 
 4224 static void
 4225 xpt_async_bcast(struct async_list *async_head,
 4226                 u_int32_t async_code,
 4227                 struct cam_path *path, void *async_arg)
 4228 {
 4229         struct async_node *cur_entry;
 4230 
 4231         cur_entry = SLIST_FIRST(async_head);
 4232         while (cur_entry != NULL) {
 4233                 struct async_node *next_entry;
 4234                 /*
 4235                  * Grab the next list entry before we call the current
 4236                  * entry's callback.  This is because the callback function
 4237                  * can delete its async callback entry.
 4238                  */
 4239                 next_entry = SLIST_NEXT(cur_entry, links);
 4240                 if ((cur_entry->event_enable & async_code) != 0)
 4241                         cur_entry->callback(cur_entry->callback_arg,
 4242                                             async_code, path,
 4243                                             async_arg);
 4244                 cur_entry = next_entry;
 4245         }
 4246 }
 4247 
 4248 static void
 4249 xpt_dev_async_default(u_int32_t async_code, struct cam_eb *bus,
 4250                       struct cam_et *target, struct cam_ed *device,
 4251                       void *async_arg)
 4252 {
 4253         printf("%s called\n", __func__);
 4254 }
 4255 
 4256 u_int32_t
 4257 xpt_freeze_devq_rl(struct cam_path *path, cam_rl rl, u_int count)
 4258 {
 4259         struct cam_ed *dev = path->device;
 4260 
 4261         mtx_assert(path->bus->sim->mtx, MA_OWNED);
 4262         dev->sim->devq->alloc_openings +=
 4263             cam_ccbq_freeze(&dev->ccbq, rl, count);
 4264         /* Remove frozen device from allocq. */
 4265         if (device_is_alloc_queued(dev) &&
 4266             cam_ccbq_frozen(&dev->ccbq, CAM_PRIORITY_TO_RL(
 4267              CAMQ_GET_PRIO(&dev->drvq)))) {
 4268                 camq_remove(&dev->sim->devq->alloc_queue,
 4269                     dev->alloc_ccb_entry.pinfo.index);
 4270         }
 4271         /* Remove frozen device from sendq. */
 4272         if (device_is_send_queued(dev) &&
 4273             cam_ccbq_frozen_top(&dev->ccbq)) {
 4274                 camq_remove(&dev->sim->devq->send_queue,
 4275                     dev->send_ccb_entry.pinfo.index);
 4276         }
 4277         return (dev->ccbq.queue.qfrozen_cnt[rl]);
 4278 }
 4279 
 4280 u_int32_t
 4281 xpt_freeze_devq(struct cam_path *path, u_int count)
 4282 {
 4283 
 4284         return (xpt_freeze_devq_rl(path, 0, count));
 4285 }
 4286 
 4287 u_int32_t
 4288 xpt_freeze_simq(struct cam_sim *sim, u_int count)
 4289 {
 4290 
 4291         mtx_assert(sim->mtx, MA_OWNED);
 4292         sim->devq->send_queue.qfrozen_cnt[0] += count;
 4293         return (sim->devq->send_queue.qfrozen_cnt[0]);
 4294 }
 4295 
 4296 static void
 4297 xpt_release_devq_timeout(void *arg)
 4298 {
 4299         struct cam_ed *device;
 4300 
 4301         device = (struct cam_ed *)arg;
 4302 
 4303         xpt_release_devq_device(device, /*rl*/0, /*count*/1, /*run_queue*/TRUE);
 4304 }
 4305 
 4306 void
 4307 xpt_release_devq(struct cam_path *path, u_int count, int run_queue)
 4308 {
 4309         mtx_assert(path->bus->sim->mtx, MA_OWNED);
 4310 
 4311         xpt_release_devq_device(path->device, /*rl*/0, count, run_queue);
 4312 }
 4313 
 4314 void
 4315 xpt_release_devq_rl(struct cam_path *path, cam_rl rl, u_int count, int run_queue)
 4316 {
 4317         mtx_assert(path->bus->sim->mtx, MA_OWNED);
 4318 
 4319         xpt_release_devq_device(path->device, rl, count, run_queue);
 4320 }
 4321 
 4322 static void
 4323 xpt_release_devq_device(struct cam_ed *dev, cam_rl rl, u_int count, int run_queue)
 4324 {
 4325 
 4326         if (count > dev->ccbq.queue.qfrozen_cnt[rl]) {
 4327 #ifdef INVARIANTS
 4328                 printf("xpt_release_devq(%d): requested %u > present %u\n",
 4329                     rl, count, dev->ccbq.queue.qfrozen_cnt[rl]);
 4330 #endif
 4331                 count = dev->ccbq.queue.qfrozen_cnt[rl];
 4332         }
 4333         dev->sim->devq->alloc_openings -=
 4334             cam_ccbq_release(&dev->ccbq, rl, count);
 4335         if (cam_ccbq_frozen(&dev->ccbq, CAM_PRIORITY_TO_RL(
 4336             CAMQ_GET_PRIO(&dev->drvq))) == 0) {
 4337                 if (xpt_schedule_dev_allocq(dev->target->bus, dev))
 4338                         xpt_run_dev_allocq(dev->target->bus);
 4339         }
 4340         if (cam_ccbq_frozen_top(&dev->ccbq) == 0) {
 4341                 /*
 4342                  * No longer need to wait for a successful
 4343                  * command completion.
 4344                  */
 4345                 dev->flags &= ~CAM_DEV_REL_ON_COMPLETE;
 4346                 /*
 4347                  * Remove any timeouts that might be scheduled
 4348                  * to release this queue.
 4349                  */
 4350                 if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
 4351                         callout_stop(&dev->callout);
 4352                         dev->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING;
 4353                 }
 4354                 if (run_queue == 0)
 4355                         return;
 4356                 /*
 4357                  * Now that we are unfrozen schedule the
 4358                  * device so any pending transactions are
 4359                  * run.
 4360                  */
 4361                 if (xpt_schedule_dev_sendq(dev->target->bus, dev))
 4362                         xpt_run_dev_sendq(dev->target->bus);
 4363         }
 4364 }
 4365 
 4366 void
 4367 xpt_release_simq(struct cam_sim *sim, int run_queue)
 4368 {
 4369         struct  camq *sendq;
 4370 
 4371         mtx_assert(sim->mtx, MA_OWNED);
 4372         sendq = &(sim->devq->send_queue);
 4373         if (sendq->qfrozen_cnt[0] <= 0) {
 4374 #ifdef INVARIANTS
 4375                 printf("xpt_release_simq: requested 1 > present %u\n",
 4376                     sendq->qfrozen_cnt[0]);
 4377 #endif
 4378         } else
 4379                 sendq->qfrozen_cnt[0]--;
 4380         if (sendq->qfrozen_cnt[0] == 0) {
 4381                 /*
 4382                  * If there is a timeout scheduled to release this
 4383                  * sim queue, remove it.  The queue frozen count is
 4384                  * already at 0.
 4385                  */
 4386                 if ((sim->flags & CAM_SIM_REL_TIMEOUT_PENDING) != 0){
 4387                         callout_stop(&sim->callout);
 4388                         sim->flags &= ~CAM_SIM_REL_TIMEOUT_PENDING;
 4389                 }
 4390                 if (run_queue) {
 4391                         struct cam_eb *bus;
 4392 
 4393                         /*
 4394                          * Now that we are unfrozen run the send queue.
 4395                          */
 4396                         bus = xpt_find_bus(sim->path_id);
 4397                         xpt_run_dev_sendq(bus);
 4398                         xpt_release_bus(bus);
 4399                 }
 4400         }
 4401 }
 4402 
 4403 /*
 4404  * XXX Appears to be unused.
 4405  */
 4406 static void
 4407 xpt_release_simq_timeout(void *arg)
 4408 {
 4409         struct cam_sim *sim;
 4410 
 4411         sim = (struct cam_sim *)arg;
 4412         xpt_release_simq(sim, /* run_queue */ TRUE);
 4413 }
 4414 
 4415 void
 4416 xpt_done(union ccb *done_ccb)
 4417 {
 4418         struct cam_sim *sim;
 4419         int     first;
 4420 
 4421         CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_done\n"));
 4422         if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) != 0) {
 4423                 /*
 4424                  * Queue up the request for handling by our SWI handler
 4425                  * any of the "non-immediate" type of ccbs.
 4426                  */
 4427                 sim = done_ccb->ccb_h.path->bus->sim;
 4428                 TAILQ_INSERT_TAIL(&sim->sim_doneq, &done_ccb->ccb_h,
 4429                     sim_links.tqe);
 4430                 done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX;
 4431                 if ((sim->flags & (CAM_SIM_ON_DONEQ | CAM_SIM_POLLED |
 4432                     CAM_SIM_BATCH)) == 0) {
 4433                         mtx_lock(&cam_simq_lock);
 4434                         first = TAILQ_EMPTY(&cam_simq);
 4435                         TAILQ_INSERT_TAIL(&cam_simq, sim, links);
 4436                         mtx_unlock(&cam_simq_lock);
 4437                         sim->flags |= CAM_SIM_ON_DONEQ;
 4438                         if (first)
 4439                                 swi_sched(cambio_ih, 0);
 4440                 }
 4441         }
 4442 }
 4443 
 4444 void
 4445 xpt_batch_start(struct cam_sim *sim)
 4446 {
 4447 
 4448         KASSERT((sim->flags & CAM_SIM_BATCH) == 0, ("Batch flag already set"));
 4449         sim->flags |= CAM_SIM_BATCH;
 4450 }
 4451 
 4452 void
 4453 xpt_batch_done(struct cam_sim *sim)
 4454 {
 4455 
 4456         KASSERT((sim->flags & CAM_SIM_BATCH) != 0, ("Batch flag was not set"));
 4457         sim->flags &= ~CAM_SIM_BATCH;
 4458         if (!TAILQ_EMPTY(&sim->sim_doneq) &&
 4459             (sim->flags & CAM_SIM_ON_DONEQ) == 0)
 4460                 camisr_runqueue(&sim->sim_doneq);
 4461 }
 4462 
 4463 union ccb *
 4464 xpt_alloc_ccb()
 4465 {
 4466         union ccb *new_ccb;
 4467 
 4468         new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_WAITOK);
 4469         return (new_ccb);
 4470 }
 4471 
 4472 union ccb *
 4473 xpt_alloc_ccb_nowait()
 4474 {
 4475         union ccb *new_ccb;
 4476 
 4477         new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_NOWAIT);
 4478         return (new_ccb);
 4479 }
 4480 
 4481 void
 4482 xpt_free_ccb(union ccb *free_ccb)
 4483 {
 4484         free(free_ccb, M_CAMCCB);
 4485 }
 4486 
 4487 
 4488 
 4489 /* Private XPT functions */
 4490 
 4491 /*
 4492  * Get a CAM control block for the caller. Charge the structure to the device
 4493  * referenced by the path.  If the this device has no 'credits' then the
 4494  * device already has the maximum number of outstanding operations under way
 4495  * and we return NULL. If we don't have sufficient resources to allocate more
 4496  * ccbs, we also return NULL.
 4497  */
 4498 static union ccb *
 4499 xpt_get_ccb(struct cam_ed *device)
 4500 {
 4501         union ccb *new_ccb;
 4502         struct cam_sim *sim;
 4503 
 4504         sim = device->sim;
 4505         if ((new_ccb = (union ccb *)SLIST_FIRST(&sim->ccb_freeq)) == NULL) {
 4506                 new_ccb = xpt_alloc_ccb_nowait();
 4507                 if (new_ccb == NULL) {
 4508                         return (NULL);
 4509                 }
 4510                 if ((sim->flags & CAM_SIM_MPSAFE) == 0)
 4511                         callout_handle_init(&new_ccb->ccb_h.timeout_ch);
 4512                 SLIST_INSERT_HEAD(&sim->ccb_freeq, &new_ccb->ccb_h,
 4513                                   xpt_links.sle);
 4514                 sim->ccb_count++;
 4515         }
 4516         cam_ccbq_take_opening(&device->ccbq);
 4517         SLIST_REMOVE_HEAD(&sim->ccb_freeq, xpt_links.sle);
 4518         return (new_ccb);
 4519 }
 4520 
 4521 static void
 4522 xpt_release_bus(struct cam_eb *bus)
 4523 {
 4524 
 4525         xpt_lock_buses();
 4526         KASSERT(bus->refcount >= 1, ("bus->refcount >= 1"));
 4527         if (--bus->refcount > 0) {
 4528                 xpt_unlock_buses();
 4529                 return;
 4530         }
 4531         KASSERT(TAILQ_EMPTY(&bus->et_entries),
 4532             ("refcount is zero, but target list is not empty"));
 4533         TAILQ_REMOVE(&xsoftc.xpt_busses, bus, links);
 4534         xsoftc.bus_generation++;
 4535         xpt_unlock_buses();
 4536         cam_sim_release(bus->sim);
 4537         free(bus, M_CAMXPT);
 4538 }
 4539 
 4540 static struct cam_et *
 4541 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id)
 4542 {
 4543         struct cam_et *cur_target, *target;
 4544 
 4545         mtx_assert(bus->sim->mtx, MA_OWNED);
 4546         target = (struct cam_et *)malloc(sizeof(*target), M_CAMXPT,
 4547                                          M_NOWAIT|M_ZERO);
 4548         if (target == NULL)
 4549                 return (NULL);
 4550 
 4551         TAILQ_INIT(&target->ed_entries);
 4552         target->bus = bus;
 4553         target->target_id = target_id;
 4554         target->refcount = 1;
 4555         target->generation = 0;
 4556         target->luns = NULL;
 4557         timevalclear(&target->last_reset);
 4558         /*
 4559          * Hold a reference to our parent bus so it
 4560          * will not go away before we do.
 4561          */
 4562         xpt_lock_buses();
 4563         bus->refcount++;
 4564         xpt_unlock_buses();
 4565 
 4566         /* Insertion sort into our bus's target list */
 4567         cur_target = TAILQ_FIRST(&bus->et_entries);
 4568         while (cur_target != NULL && cur_target->target_id < target_id)
 4569                 cur_target = TAILQ_NEXT(cur_target, links);
 4570         if (cur_target != NULL) {
 4571                 TAILQ_INSERT_BEFORE(cur_target, target, links);
 4572         } else {
 4573                 TAILQ_INSERT_TAIL(&bus->et_entries, target, links);
 4574         }
 4575         bus->generation++;
 4576         return (target);
 4577 }
 4578 
 4579 static void
 4580 xpt_release_target(struct cam_et *target)
 4581 {
 4582 
 4583         mtx_assert(target->bus->sim->mtx, MA_OWNED);
 4584         if (--target->refcount > 0)
 4585                 return;
 4586         KASSERT(TAILQ_EMPTY(&target->ed_entries),
 4587             ("refcount is zero, but device list is not empty"));
 4588         TAILQ_REMOVE(&target->bus->et_entries, target, links);
 4589         target->bus->generation++;
 4590         xpt_release_bus(target->bus);
 4591         if (target->luns)
 4592                 free(target->luns, M_CAMXPT);
 4593         free(target, M_CAMXPT);
 4594 }
 4595 
 4596 static struct cam_ed *
 4597 xpt_alloc_device_default(struct cam_eb *bus, struct cam_et *target,
 4598                          lun_id_t lun_id)
 4599 {
 4600         struct cam_ed *device;
 4601 
 4602         device = xpt_alloc_device(bus, target, lun_id);
 4603         if (device == NULL)
 4604                 return (NULL);
 4605 
 4606         device->mintags = 1;
 4607         device->maxtags = 1;
 4608         bus->sim->max_ccbs += device->ccbq.devq_openings;
 4609         return (device);
 4610 }
 4611 
 4612 struct cam_ed *
 4613 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id)
 4614 {
 4615         struct cam_ed   *cur_device, *device;
 4616         struct cam_devq *devq;
 4617         cam_status status;
 4618 
 4619         mtx_assert(target->bus->sim->mtx, MA_OWNED);
 4620         /* Make space for us in the device queue on our bus */
 4621         devq = bus->sim->devq;
 4622         status = cam_devq_resize(devq, devq->alloc_queue.array_size + 1);
 4623         if (status != CAM_REQ_CMP)
 4624                 return (NULL);
 4625 
 4626         device = (struct cam_ed *)malloc(sizeof(*device),
 4627                                          M_CAMDEV, M_NOWAIT|M_ZERO);
 4628         if (device == NULL)
 4629                 return (NULL);
 4630 
 4631         cam_init_pinfo(&device->alloc_ccb_entry.pinfo);
 4632         device->alloc_ccb_entry.device = device;
 4633         cam_init_pinfo(&device->send_ccb_entry.pinfo);
 4634         device->send_ccb_entry.device = device;
 4635         device->target = target;
 4636         device->lun_id = lun_id;
 4637         device->sim = bus->sim;
 4638         /* Initialize our queues */
 4639         if (camq_init(&device->drvq, 0) != 0) {
 4640                 free(device, M_CAMDEV);
 4641                 return (NULL);
 4642         }
 4643         if (cam_ccbq_init(&device->ccbq,
 4644                           bus->sim->max_dev_openings) != 0) {
 4645                 camq_fini(&device->drvq);
 4646                 free(device, M_CAMDEV);
 4647                 return (NULL);
 4648         }
 4649         SLIST_INIT(&device->asyncs);
 4650         SLIST_INIT(&device->periphs);
 4651         device->generation = 0;
 4652         device->owner = NULL;
 4653         device->flags = CAM_DEV_UNCONFIGURED;
 4654         device->tag_delay_count = 0;
 4655         device->tag_saved_openings = 0;
 4656         device->refcount = 1;
 4657         callout_init_mtx(&device->callout, bus->sim->mtx, 0);
 4658 
 4659         cur_device = TAILQ_FIRST(&target->ed_entries);
 4660         while (cur_device != NULL && cur_device->lun_id < lun_id)
 4661                 cur_device = TAILQ_NEXT(cur_device, links);
 4662         if (cur_device != NULL)
 4663                 TAILQ_INSERT_BEFORE(cur_device, device, links);
 4664         else
 4665                 TAILQ_INSERT_TAIL(&target->ed_entries, device, links);
 4666         target->refcount++;
 4667         target->generation++;
 4668         return (device);
 4669 }
 4670 
 4671 void
 4672 xpt_acquire_device(struct cam_ed *device)
 4673 {
 4674 
 4675         mtx_assert(device->sim->mtx, MA_OWNED);
 4676         device->refcount++;
 4677 }
 4678 
 4679 void
 4680 xpt_release_device(struct cam_ed *device)
 4681 {
 4682         struct cam_devq *devq;
 4683 
 4684         mtx_assert(device->sim->mtx, MA_OWNED);
 4685         if (--device->refcount > 0)
 4686                 return;
 4687 
 4688         KASSERT(SLIST_EMPTY(&device->periphs),
 4689             ("refcount is zero, but periphs list is not empty"));
 4690         if (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX
 4691          || device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX)
 4692                 panic("Removing device while still queued for ccbs");
 4693 
 4694         if ((device->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0)
 4695                 callout_stop(&device->callout);
 4696 
 4697         TAILQ_REMOVE(&device->target->ed_entries, device,links);
 4698         device->target->generation++;
 4699         device->target->bus->sim->max_ccbs -= device->ccbq.devq_openings;
 4700         /* Release our slot in the devq */
 4701         devq = device->target->bus->sim->devq;
 4702         cam_devq_resize(devq, devq->alloc_queue.array_size - 1);
 4703         camq_fini(&device->drvq);
 4704         cam_ccbq_fini(&device->ccbq);
 4705         /*
 4706          * Free allocated memory.  free(9) does nothing if the
 4707          * supplied pointer is NULL, so it is safe to call without
 4708          * checking.
 4709          */
 4710         free(device->supported_vpds, M_CAMXPT);
 4711         free(device->device_id, M_CAMXPT);
 4712         free(device->physpath, M_CAMXPT);
 4713         free(device->rcap_buf, M_CAMXPT);
 4714         free(device->serial_num, M_CAMXPT);
 4715 
 4716         xpt_release_target(device->target);
 4717         free(device, M_CAMDEV);
 4718 }
 4719 
 4720 u_int32_t
 4721 xpt_dev_ccbq_resize(struct cam_path *path, int newopenings)
 4722 {
 4723         int     diff;
 4724         int     result;
 4725         struct  cam_ed *dev;
 4726 
 4727         dev = path->device;
 4728 
 4729         diff = newopenings - (dev->ccbq.dev_active + dev->ccbq.dev_openings);
 4730         result = cam_ccbq_resize(&dev->ccbq, newopenings);
 4731         if (result == CAM_REQ_CMP && (diff < 0)) {
 4732                 dev->flags |= CAM_DEV_RESIZE_QUEUE_NEEDED;
 4733         }
 4734         if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
 4735          || (dev->inq_flags & SID_CmdQue) != 0)
 4736                 dev->tag_saved_openings = newopenings;
 4737         /* Adjust the global limit */
 4738         dev->sim->max_ccbs += diff;
 4739         return (result);
 4740 }
 4741 
 4742 static struct cam_eb *
 4743 xpt_find_bus(path_id_t path_id)
 4744 {
 4745         struct cam_eb *bus;
 4746 
 4747         xpt_lock_buses();
 4748         for (bus = TAILQ_FIRST(&xsoftc.xpt_busses);
 4749              bus != NULL;
 4750              bus = TAILQ_NEXT(bus, links)) {
 4751                 if (bus->path_id == path_id) {
 4752                         bus->refcount++;
 4753                         break;
 4754                 }
 4755         }
 4756         xpt_unlock_buses();
 4757         return (bus);
 4758 }
 4759 
 4760 static struct cam_et *
 4761 xpt_find_target(struct cam_eb *bus, target_id_t target_id)
 4762 {
 4763         struct cam_et *target;
 4764 
 4765         mtx_assert(bus->sim->mtx, MA_OWNED);
 4766         for (target = TAILQ_FIRST(&bus->et_entries);
 4767              target != NULL;
 4768              target = TAILQ_NEXT(target, links)) {
 4769                 if (target->target_id == target_id) {
 4770                         target->refcount++;
 4771                         break;
 4772                 }
 4773         }
 4774         return (target);
 4775 }
 4776 
 4777 static struct cam_ed *
 4778 xpt_find_device(struct cam_et *target, lun_id_t lun_id)
 4779 {
 4780         struct cam_ed *device;
 4781 
 4782         mtx_assert(target->bus->sim->mtx, MA_OWNED);
 4783         for (device = TAILQ_FIRST(&target->ed_entries);
 4784              device != NULL;
 4785              device = TAILQ_NEXT(device, links)) {
 4786                 if (device->lun_id == lun_id) {
 4787                         device->refcount++;
 4788                         break;
 4789                 }
 4790         }
 4791         return (device);
 4792 }
 4793 
 4794 void
 4795 xpt_start_tags(struct cam_path *path)
 4796 {
 4797         struct ccb_relsim crs;
 4798         struct cam_ed *device;
 4799         struct cam_sim *sim;
 4800         int    newopenings;
 4801 
 4802         device = path->device;
 4803         sim = path->bus->sim;
 4804         device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
 4805         xpt_freeze_devq(path, /*count*/1);
 4806         device->inq_flags |= SID_CmdQue;
 4807         if (device->tag_saved_openings != 0)
 4808                 newopenings = device->tag_saved_openings;
 4809         else
 4810                 newopenings = min(device->maxtags,
 4811                                   sim->max_tagged_dev_openings);
 4812         xpt_dev_ccbq_resize(path, newopenings);
 4813         xpt_async(AC_GETDEV_CHANGED, path, NULL);
 4814         xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL);
 4815         crs.ccb_h.func_code = XPT_REL_SIMQ;
 4816         crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
 4817         crs.openings
 4818             = crs.release_timeout
 4819             = crs.qfrozen_cnt
 4820             = 0;
 4821         xpt_action((union ccb *)&crs);
 4822 }
 4823 
 4824 void
 4825 xpt_stop_tags(struct cam_path *path)
 4826 {
 4827         struct ccb_relsim crs;
 4828         struct cam_ed *device;
 4829         struct cam_sim *sim;
 4830 
 4831         device = path->device;
 4832         sim = path->bus->sim;
 4833         device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
 4834         device->tag_delay_count = 0;
 4835         xpt_freeze_devq(path, /*count*/1);
 4836         device->inq_flags &= ~SID_CmdQue;
 4837         xpt_dev_ccbq_resize(path, sim->max_dev_openings);
 4838         xpt_async(AC_GETDEV_CHANGED, path, NULL);
 4839         xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL);
 4840         crs.ccb_h.func_code = XPT_REL_SIMQ;
 4841         crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
 4842         crs.openings
 4843             = crs.release_timeout
 4844             = crs.qfrozen_cnt
 4845             = 0;
 4846         xpt_action((union ccb *)&crs);
 4847 }
 4848 
 4849 static void
 4850 xpt_boot_delay(void *arg)
 4851 {
 4852 
 4853         xpt_release_boot();
 4854 }
 4855 
 4856 static void
 4857 xpt_config(void *arg)
 4858 {
 4859         /*
 4860          * Now that interrupts are enabled, go find our devices
 4861          */
 4862 
 4863         /* Setup debugging path */
 4864         if (cam_dflags != CAM_DEBUG_NONE) {
 4865                 if (xpt_create_path_unlocked(&cam_dpath, NULL,
 4866                                     CAM_DEBUG_BUS, CAM_DEBUG_TARGET,
 4867                                     CAM_DEBUG_LUN) != CAM_REQ_CMP) {
 4868                         printf("xpt_config: xpt_create_path() failed for debug"
 4869                                " target %d:%d:%d, debugging disabled\n",
 4870                                CAM_DEBUG_BUS, CAM_DEBUG_TARGET, CAM_DEBUG_LUN);
 4871                         cam_dflags = CAM_DEBUG_NONE;
 4872                 }
 4873         } else
 4874                 cam_dpath = NULL;
 4875 
 4876         periphdriver_init(1);
 4877         xpt_hold_boot();
 4878         callout_init(&xsoftc.boot_callout, 1);
 4879         callout_reset(&xsoftc.boot_callout, hz * xsoftc.boot_delay / 1000,
 4880             xpt_boot_delay, NULL);
 4881         /* Fire up rescan thread. */
 4882         if (kproc_create(xpt_scanner_thread, NULL, NULL, 0, 0, "xpt_thrd")) {
 4883                 printf("xpt_config: failed to create rescan thread.\n");
 4884         }
 4885 }
 4886 
 4887 void
 4888 xpt_hold_boot(void)
 4889 {
 4890         xpt_lock_buses();
 4891         xsoftc.buses_to_config++;
 4892         xpt_unlock_buses();
 4893 }
 4894 
 4895 void
 4896 xpt_release_boot(void)
 4897 {
 4898         xpt_lock_buses();
 4899         xsoftc.buses_to_config--;
 4900         if (xsoftc.buses_to_config == 0 && xsoftc.buses_config_done == 0) {
 4901                 struct  xpt_task *task;
 4902 
 4903                 xsoftc.buses_config_done = 1;
 4904                 xpt_unlock_buses();
 4905                 /* Call manually because we don't have any busses */
 4906                 task = malloc(sizeof(struct xpt_task), M_CAMXPT, M_NOWAIT);
 4907                 if (task != NULL) {
 4908                         TASK_INIT(&task->task, 0, xpt_finishconfig_task, task);
 4909                         taskqueue_enqueue(taskqueue_thread, &task->task);
 4910                 }
 4911         } else
 4912                 xpt_unlock_buses();
 4913 }
 4914 
 4915 /*
 4916  * If the given device only has one peripheral attached to it, and if that
 4917  * peripheral is the passthrough driver, announce it.  This insures that the
 4918  * user sees some sort of announcement for every peripheral in their system.
 4919  */
 4920 static int
 4921 xptpassannouncefunc(struct cam_ed *device, void *arg)
 4922 {
 4923         struct cam_periph *periph;
 4924         int i;
 4925 
 4926         for (periph = SLIST_FIRST(&device->periphs), i = 0; periph != NULL;
 4927              periph = SLIST_NEXT(periph, periph_links), i++);
 4928 
 4929         periph = SLIST_FIRST(&device->periphs);
 4930         if ((i == 1)
 4931          && (strncmp(periph->periph_name, "pass", 4) == 0))
 4932                 xpt_announce_periph(periph, NULL);
 4933 
 4934         return(1);
 4935 }
 4936 
 4937 static void
 4938 xpt_finishconfig_task(void *context, int pending)
 4939 {
 4940 
 4941         periphdriver_init(2);
 4942         /*
 4943          * Check for devices with no "standard" peripheral driver
 4944          * attached.  For any devices like that, announce the
 4945          * passthrough driver so the user will see something.
 4946          */
 4947         if (!bootverbose)
 4948                 xpt_for_all_devices(xptpassannouncefunc, NULL);
 4949 
 4950         /* Release our hook so that the boot can continue. */
 4951         config_intrhook_disestablish(xsoftc.xpt_config_hook);
 4952         free(xsoftc.xpt_config_hook, M_CAMXPT);
 4953         xsoftc.xpt_config_hook = NULL;
 4954 
 4955         free(context, M_CAMXPT);
 4956 }
 4957 
 4958 cam_status
 4959 xpt_register_async(int event, ac_callback_t *cbfunc, void *cbarg,
 4960                    struct cam_path *path)
 4961 {
 4962         struct ccb_setasync csa;
 4963         cam_status status;
 4964         int xptpath = 0;
 4965 
 4966         if (path == NULL) {
 4967                 mtx_lock(&xsoftc.xpt_lock);
 4968                 status = xpt_create_path(&path, /*periph*/NULL, CAM_XPT_PATH_ID,
 4969                                          CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
 4970                 if (status != CAM_REQ_CMP) {
 4971                         mtx_unlock(&xsoftc.xpt_lock);
 4972                         return (status);
 4973                 }
 4974                 xptpath = 1;
 4975         }
 4976 
 4977         xpt_setup_ccb(&csa.ccb_h, path, CAM_PRIORITY_NORMAL);
 4978         csa.ccb_h.func_code = XPT_SASYNC_CB;
 4979         csa.event_enable = event;
 4980         csa.callback = cbfunc;
 4981         csa.callback_arg = cbarg;
 4982         xpt_action((union ccb *)&csa);
 4983         status = csa.ccb_h.status;
 4984 
 4985         if (xptpath) {
 4986                 xpt_free_path(path);
 4987                 mtx_unlock(&xsoftc.xpt_lock);
 4988         }
 4989 
 4990         if ((status == CAM_REQ_CMP) &&
 4991             (csa.event_enable & AC_FOUND_DEVICE)) {
 4992                 /*
 4993                  * Get this peripheral up to date with all
 4994                  * the currently existing devices.
 4995                  */
 4996                 xpt_for_all_devices(xptsetasyncfunc, &csa);
 4997         }
 4998         if ((status == CAM_REQ_CMP) &&
 4999             (csa.event_enable & AC_PATH_REGISTERED)) {
 5000                 /*
 5001                  * Get this peripheral up to date with all
 5002                  * the currently existing busses.
 5003                  */
 5004                 xpt_for_all_busses(xptsetasyncbusfunc, &csa);
 5005         }
 5006 
 5007         return (status);
 5008 }
 5009 
 5010 static void
 5011 xptaction(struct cam_sim *sim, union ccb *work_ccb)
 5012 {
 5013         CAM_DEBUG(work_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xptaction\n"));
 5014 
 5015         switch (work_ccb->ccb_h.func_code) {
 5016         /* Common cases first */
 5017         case XPT_PATH_INQ:              /* Path routing inquiry */
 5018         {
 5019                 struct ccb_pathinq *cpi;
 5020 
 5021                 cpi = &work_ccb->cpi;
 5022                 cpi->version_num = 1; /* XXX??? */
 5023                 cpi->hba_inquiry = 0;
 5024                 cpi->target_sprt = 0;
 5025                 cpi->hba_misc = 0;
 5026                 cpi->hba_eng_cnt = 0;
 5027                 cpi->max_target = 0;
 5028                 cpi->max_lun = 0;
 5029                 cpi->initiator_id = 0;
 5030                 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
 5031                 strncpy(cpi->hba_vid, "", HBA_IDLEN);
 5032                 strncpy(cpi->dev_name, sim->sim_name, DEV_IDLEN);
 5033                 cpi->unit_number = sim->unit_number;
 5034                 cpi->bus_id = sim->bus_id;
 5035                 cpi->base_transfer_speed = 0;
 5036                 cpi->protocol = PROTO_UNSPECIFIED;
 5037                 cpi->protocol_version = PROTO_VERSION_UNSPECIFIED;
 5038                 cpi->transport = XPORT_UNSPECIFIED;
 5039                 cpi->transport_version = XPORT_VERSION_UNSPECIFIED;
 5040                 cpi->ccb_h.status = CAM_REQ_CMP;
 5041                 xpt_done(work_ccb);
 5042                 break;
 5043         }
 5044         default:
 5045                 work_ccb->ccb_h.status = CAM_REQ_INVALID;
 5046                 xpt_done(work_ccb);
 5047                 break;
 5048         }
 5049 }
 5050 
 5051 /*
 5052  * The xpt as a "controller" has no interrupt sources, so polling
 5053  * is a no-op.
 5054  */
 5055 static void
 5056 xptpoll(struct cam_sim *sim)
 5057 {
 5058 }
 5059 
 5060 void
 5061 xpt_lock_buses(void)
 5062 {
 5063         mtx_lock(&xsoftc.xpt_topo_lock);
 5064 }
 5065 
 5066 void
 5067 xpt_unlock_buses(void)
 5068 {
 5069         mtx_unlock(&xsoftc.xpt_topo_lock);
 5070 }
 5071 
 5072 static void
 5073 camisr(void *dummy)
 5074 {
 5075         cam_simq_t queue;
 5076         struct cam_sim *sim;
 5077 
 5078         mtx_lock(&cam_simq_lock);
 5079         TAILQ_INIT(&queue);
 5080         while (!TAILQ_EMPTY(&cam_simq)) {
 5081                 TAILQ_CONCAT(&queue, &cam_simq, links);
 5082                 mtx_unlock(&cam_simq_lock);
 5083 
 5084                 while ((sim = TAILQ_FIRST(&queue)) != NULL) {
 5085                         TAILQ_REMOVE(&queue, sim, links);
 5086                         CAM_SIM_LOCK(sim);
 5087                         camisr_runqueue(&sim->sim_doneq);
 5088                         sim->flags &= ~CAM_SIM_ON_DONEQ;
 5089                         CAM_SIM_UNLOCK(sim);
 5090                 }
 5091                 mtx_lock(&cam_simq_lock);
 5092         }
 5093         mtx_unlock(&cam_simq_lock);
 5094 }
 5095 
 5096 static void
 5097 camisr_runqueue(void *V_queue)
 5098 {
 5099         cam_isrq_t *queue = V_queue;
 5100         struct  ccb_hdr *ccb_h;
 5101 
 5102         while ((ccb_h = TAILQ_FIRST(queue)) != NULL) {
 5103                 int     runq;
 5104 
 5105                 TAILQ_REMOVE(queue, ccb_h, sim_links.tqe);
 5106                 ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
 5107 
 5108                 CAM_DEBUG(ccb_h->path, CAM_DEBUG_TRACE,
 5109                           ("camisr\n"));
 5110 
 5111                 runq = FALSE;
 5112 
 5113                 if (ccb_h->flags & CAM_HIGH_POWER) {
 5114                         struct highpowerlist    *hphead;
 5115                         union ccb               *send_ccb;
 5116 
 5117                         mtx_lock(&xsoftc.xpt_lock);
 5118                         hphead = &xsoftc.highpowerq;
 5119 
 5120                         send_ccb = (union ccb *)STAILQ_FIRST(hphead);
 5121 
 5122                         /*
 5123                          * Increment the count since this command is done.
 5124                          */
 5125                         xsoftc.num_highpower++;
 5126 
 5127                         /*
 5128                          * Any high powered commands queued up?
 5129                          */
 5130                         if (send_ccb != NULL) {
 5131 
 5132                                 STAILQ_REMOVE_HEAD(hphead, xpt_links.stqe);
 5133                                 mtx_unlock(&xsoftc.xpt_lock);
 5134 
 5135                                 xpt_release_devq(send_ccb->ccb_h.path,
 5136                                                  /*count*/1, /*runqueue*/TRUE);
 5137                         } else
 5138                                 mtx_unlock(&xsoftc.xpt_lock);
 5139                 }
 5140 
 5141                 if ((ccb_h->func_code & XPT_FC_USER_CCB) == 0) {
 5142                         struct cam_ed *dev;
 5143 
 5144                         dev = ccb_h->path->device;
 5145 
 5146                         cam_ccbq_ccb_done(&dev->ccbq, (union ccb *)ccb_h);
 5147                         ccb_h->path->bus->sim->devq->send_active--;
 5148                         ccb_h->path->bus->sim->devq->send_openings++;
 5149                         runq = TRUE;
 5150 
 5151                         if (((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
 5152                           && (dev->ccbq.dev_active == 0))) {
 5153                                 dev->flags &= ~CAM_DEV_REL_ON_QUEUE_EMPTY;
 5154                                 xpt_release_devq(ccb_h->path, /*count*/1,
 5155                                                  /*run_queue*/FALSE);
 5156                         }
 5157 
 5158                         if (((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0
 5159                           && (ccb_h->status&CAM_STATUS_MASK) != CAM_REQUEUE_REQ)) {
 5160                                 dev->flags &= ~CAM_DEV_REL_ON_COMPLETE;
 5161                                 xpt_release_devq(ccb_h->path, /*count*/1,
 5162                                                  /*run_queue*/FALSE);
 5163                         }
 5164 
 5165                         if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
 5166                          && (--dev->tag_delay_count == 0))
 5167                                 xpt_start_tags(ccb_h->path);
 5168                         if (!device_is_send_queued(dev)) {
 5169                                 (void)xpt_schedule_dev_sendq(ccb_h->path->bus, 
 5170                                                              dev);
 5171                         }
 5172                 }
 5173 
 5174                 if (ccb_h->status & CAM_RELEASE_SIMQ) {
 5175                         xpt_release_simq(ccb_h->path->bus->sim,
 5176                                          /*run_queue*/TRUE);
 5177                         ccb_h->status &= ~CAM_RELEASE_SIMQ;
 5178                         runq = FALSE;
 5179                 }
 5180 
 5181                 if ((ccb_h->flags & CAM_DEV_QFRZDIS)
 5182                  && (ccb_h->status & CAM_DEV_QFRZN)) {
 5183                         xpt_release_devq(ccb_h->path, /*count*/1,
 5184                                          /*run_queue*/TRUE);
 5185                         ccb_h->status &= ~CAM_DEV_QFRZN;
 5186                 } else if (runq) {
 5187                         xpt_run_dev_sendq(ccb_h->path->bus);
 5188                 }
 5189 
 5190                 /* Call the peripheral driver's callback */
 5191                 (*ccb_h->cbfcnp)(ccb_h->path->periph, (union ccb *)ccb_h);
 5192         }
 5193 }

Cache object: eecadf6ea89f063eb62ac8c134092013


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.