The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/cam/cam_xpt.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Implementation of the Common Access Method Transport (XPT) layer.
    3  *
    4  * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs.
    5  * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry.
    6  * All rights reserved.
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions, and the following disclaimer,
   13  *    without modification, immediately at the beginning of the file.
   14  * 2. The name of the author may not be used to endorse or promote products
   15  *    derived from this software without specific prior written permission.
   16  *
   17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   20  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
   21  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   27  * SUCH DAMAGE.
   28  */
   29 
   30 #include <sys/cdefs.h>
   31 __FBSDID("$FreeBSD: releng/8.2/sys/cam/cam_xpt.c 215173 2010-11-12 11:22:59Z brucec $");
   32 
   33 #include <sys/param.h>
   34 #include <sys/bus.h>
   35 #include <sys/systm.h>
   36 #include <sys/types.h>
   37 #include <sys/malloc.h>
   38 #include <sys/kernel.h>
   39 #include <sys/time.h>
   40 #include <sys/conf.h>
   41 #include <sys/fcntl.h>
   42 #include <sys/reboot.h>
   43 #include <sys/interrupt.h>
   44 #include <sys/sbuf.h>
   45 #include <sys/taskqueue.h>
   46 
   47 #include <sys/lock.h>
   48 #include <sys/mutex.h>
   49 #include <sys/sysctl.h>
   50 #include <sys/kthread.h>
   51 
   52 #ifdef PC98
   53 #include <pc98/pc98/pc98_machdep.h>     /* geometry translation */
   54 #endif
   55 
   56 #include <cam/cam.h>
   57 #include <cam/cam_ccb.h>
   58 #include <cam/cam_periph.h>
   59 #include <cam/cam_queue.h>
   60 #include <cam/cam_sim.h>
   61 #include <cam/cam_xpt.h>
   62 #include <cam/cam_xpt_sim.h>
   63 #include <cam/cam_xpt_periph.h>
   64 #include <cam/cam_xpt_internal.h>
   65 #include <cam/cam_debug.h>
   66 
   67 #include <cam/scsi/scsi_all.h>
   68 #include <cam/scsi/scsi_message.h>
   69 #include <cam/scsi/scsi_pass.h>
   70 #include <machine/stdarg.h>     /* for xpt_print below */
   71 #include "opt_cam.h"
   72 
   73 /*
   74  * This is the maximum number of high powered commands (e.g. start unit)
   75  * that can be outstanding at a particular time.
   76  */
   77 #ifndef CAM_MAX_HIGHPOWER
   78 #define CAM_MAX_HIGHPOWER  4
   79 #endif
   80 
   81 /* Datastructures internal to the xpt layer */
   82 MALLOC_DEFINE(M_CAMXPT, "CAM XPT", "CAM XPT buffers");
   83 
   84 /* Object for defering XPT actions to a taskqueue */
   85 struct xpt_task {
   86         struct task     task;
   87         void            *data1;
   88         uintptr_t       data2;
   89 };
   90 
   91 typedef enum {
   92         XPT_FLAG_OPEN           = 0x01
   93 } xpt_flags;
   94 
   95 struct xpt_softc {
   96         xpt_flags               flags;
   97         u_int32_t               xpt_generation;
   98 
   99         /* number of high powered commands that can go through right now */
  100         STAILQ_HEAD(highpowerlist, ccb_hdr)     highpowerq;
  101         int                     num_highpower;
  102 
  103         /* queue for handling async rescan requests. */
  104         TAILQ_HEAD(, ccb_hdr) ccb_scanq;
  105         int buses_to_config;
  106         int buses_config_done;
  107 
  108         /* Registered busses */
  109         TAILQ_HEAD(,cam_eb)     xpt_busses;
  110         u_int                   bus_generation;
  111 
  112         struct intr_config_hook *xpt_config_hook;
  113 
  114         int                     boot_delay;
  115         struct callout          boot_callout;
  116 
  117         struct mtx              xpt_topo_lock;
  118         struct mtx              xpt_lock;
  119 };
  120 
  121 typedef enum {
  122         DM_RET_COPY             = 0x01,
  123         DM_RET_FLAG_MASK        = 0x0f,
  124         DM_RET_NONE             = 0x00,
  125         DM_RET_STOP             = 0x10,
  126         DM_RET_DESCEND          = 0x20,
  127         DM_RET_ERROR            = 0x30,
  128         DM_RET_ACTION_MASK      = 0xf0
  129 } dev_match_ret;
  130 
  131 typedef enum {
  132         XPT_DEPTH_BUS,
  133         XPT_DEPTH_TARGET,
  134         XPT_DEPTH_DEVICE,
  135         XPT_DEPTH_PERIPH
  136 } xpt_traverse_depth;
  137 
  138 struct xpt_traverse_config {
  139         xpt_traverse_depth      depth;
  140         void                    *tr_func;
  141         void                    *tr_arg;
  142 };
  143 
  144 typedef int     xpt_busfunc_t (struct cam_eb *bus, void *arg);
  145 typedef int     xpt_targetfunc_t (struct cam_et *target, void *arg);
  146 typedef int     xpt_devicefunc_t (struct cam_ed *device, void *arg);
  147 typedef int     xpt_periphfunc_t (struct cam_periph *periph, void *arg);
  148 typedef int     xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg);
  149 
  150 /* Transport layer configuration information */
  151 static struct xpt_softc xsoftc;
  152 
  153 TUNABLE_INT("kern.cam.boot_delay", &xsoftc.boot_delay);
  154 SYSCTL_INT(_kern_cam, OID_AUTO, boot_delay, CTLFLAG_RDTUN,
  155            &xsoftc.boot_delay, 0, "Bus registration wait time");
  156 
  157 /* Queues for our software interrupt handler */
  158 typedef TAILQ_HEAD(cam_isrq, ccb_hdr) cam_isrq_t;
  159 typedef TAILQ_HEAD(cam_simq, cam_sim) cam_simq_t;
  160 static cam_simq_t cam_simq;
  161 static struct mtx cam_simq_lock;
  162 
  163 /* Pointers to software interrupt handlers */
  164 static void *cambio_ih;
  165 
  166 struct cam_periph *xpt_periph;
  167 
  168 static periph_init_t xpt_periph_init;
  169 
  170 static struct periph_driver xpt_driver =
  171 {
  172         xpt_periph_init, "xpt",
  173         TAILQ_HEAD_INITIALIZER(xpt_driver.units), /* generation */ 0,
  174         CAM_PERIPH_DRV_EARLY
  175 };
  176 
  177 PERIPHDRIVER_DECLARE(xpt, xpt_driver);
  178 
  179 static d_open_t xptopen;
  180 static d_close_t xptclose;
  181 static d_ioctl_t xptioctl;
  182 
  183 static struct cdevsw xpt_cdevsw = {
  184         .d_version =    D_VERSION,
  185         .d_flags =      0,
  186         .d_open =       xptopen,
  187         .d_close =      xptclose,
  188         .d_ioctl =      xptioctl,
  189         .d_name =       "xpt",
  190 };
  191 
  192 /* Storage for debugging datastructures */
  193 #ifdef  CAMDEBUG
  194 struct cam_path *cam_dpath;
  195 u_int32_t cam_dflags;
  196 u_int32_t cam_debug_delay;
  197 #endif
  198 
  199 /* Our boot-time initialization hook */
  200 static int cam_module_event_handler(module_t, int /*modeventtype_t*/, void *);
  201 
  202 static moduledata_t cam_moduledata = {
  203         "cam",
  204         cam_module_event_handler,
  205         NULL
  206 };
  207 
  208 static int      xpt_init(void *);
  209 
  210 DECLARE_MODULE(cam, cam_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND);
  211 MODULE_VERSION(cam, 1);
  212 
  213 
  214 static void             xpt_async_bcast(struct async_list *async_head,
  215                                         u_int32_t async_code,
  216                                         struct cam_path *path,
  217                                         void *async_arg);
  218 static path_id_t xptnextfreepathid(void);
  219 static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus);
  220 static union ccb *xpt_get_ccb(struct cam_ed *device);
  221 static void      xpt_run_dev_allocq(struct cam_eb *bus);
  222 static void      xpt_run_dev_sendq(struct cam_eb *bus);
  223 static timeout_t xpt_release_devq_timeout;
  224 static void      xpt_release_simq_timeout(void *arg) __unused;
  225 static void      xpt_release_bus(struct cam_eb *bus);
  226 static void      xpt_release_devq_device(struct cam_ed *dev, cam_rl rl,
  227                     u_int count, int run_queue);
  228 static struct cam_et*
  229                  xpt_alloc_target(struct cam_eb *bus, target_id_t target_id);
  230 static void      xpt_release_target(struct cam_et *target);
  231 static struct cam_eb*
  232                  xpt_find_bus(path_id_t path_id);
  233 static struct cam_et*
  234                  xpt_find_target(struct cam_eb *bus, target_id_t target_id);
  235 static struct cam_ed*
  236                  xpt_find_device(struct cam_et *target, lun_id_t lun_id);
  237 static void      xpt_config(void *arg);
  238 static xpt_devicefunc_t xptpassannouncefunc;
  239 static void      xptaction(struct cam_sim *sim, union ccb *work_ccb);
  240 static void      xptpoll(struct cam_sim *sim);
  241 static void      camisr(void *);
  242 static void      camisr_runqueue(void *);
  243 static dev_match_ret    xptbusmatch(struct dev_match_pattern *patterns,
  244                                     u_int num_patterns, struct cam_eb *bus);
  245 static dev_match_ret    xptdevicematch(struct dev_match_pattern *patterns,
  246                                        u_int num_patterns,
  247                                        struct cam_ed *device);
  248 static dev_match_ret    xptperiphmatch(struct dev_match_pattern *patterns,
  249                                        u_int num_patterns,
  250                                        struct cam_periph *periph);
  251 static xpt_busfunc_t    xptedtbusfunc;
  252 static xpt_targetfunc_t xptedttargetfunc;
  253 static xpt_devicefunc_t xptedtdevicefunc;
  254 static xpt_periphfunc_t xptedtperiphfunc;
  255 static xpt_pdrvfunc_t   xptplistpdrvfunc;
  256 static xpt_periphfunc_t xptplistperiphfunc;
  257 static int              xptedtmatch(struct ccb_dev_match *cdm);
  258 static int              xptperiphlistmatch(struct ccb_dev_match *cdm);
  259 static int              xptbustraverse(struct cam_eb *start_bus,
  260                                        xpt_busfunc_t *tr_func, void *arg);
  261 static int              xpttargettraverse(struct cam_eb *bus,
  262                                           struct cam_et *start_target,
  263                                           xpt_targetfunc_t *tr_func, void *arg);
  264 static int              xptdevicetraverse(struct cam_et *target,
  265                                           struct cam_ed *start_device,
  266                                           xpt_devicefunc_t *tr_func, void *arg);
  267 static int              xptperiphtraverse(struct cam_ed *device,
  268                                           struct cam_periph *start_periph,
  269                                           xpt_periphfunc_t *tr_func, void *arg);
  270 static int              xptpdrvtraverse(struct periph_driver **start_pdrv,
  271                                         xpt_pdrvfunc_t *tr_func, void *arg);
  272 static int              xptpdperiphtraverse(struct periph_driver **pdrv,
  273                                             struct cam_periph *start_periph,
  274                                             xpt_periphfunc_t *tr_func,
  275                                             void *arg);
  276 static xpt_busfunc_t    xptdefbusfunc;
  277 static xpt_targetfunc_t xptdeftargetfunc;
  278 static xpt_devicefunc_t xptdefdevicefunc;
  279 static xpt_periphfunc_t xptdefperiphfunc;
  280 static void             xpt_finishconfig_task(void *context, int pending);
  281 static int              xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg);
  282 static int              xpt_for_all_devices(xpt_devicefunc_t *tr_func,
  283                                             void *arg);
  284 static void             xpt_dev_async_default(u_int32_t async_code,
  285                                               struct cam_eb *bus,
  286                                               struct cam_et *target,
  287                                               struct cam_ed *device,
  288                                               void *async_arg);
  289 static struct cam_ed *  xpt_alloc_device_default(struct cam_eb *bus,
  290                                                  struct cam_et *target,
  291                                                  lun_id_t lun_id);
  292 static xpt_devicefunc_t xptsetasyncfunc;
  293 static xpt_busfunc_t    xptsetasyncbusfunc;
  294 static cam_status       xptregister(struct cam_periph *periph,
  295                                     void *arg);
  296 static __inline int periph_is_queued(struct cam_periph *periph);
  297 static __inline int device_is_alloc_queued(struct cam_ed *device);
  298 static __inline int device_is_send_queued(struct cam_ed *device);
  299 
  300 static __inline int
  301 xpt_schedule_dev_allocq(struct cam_eb *bus, struct cam_ed *dev)
  302 {
  303         int retval;
  304 
  305         if ((dev->drvq.entries > 0) &&
  306             (dev->ccbq.devq_openings > 0) &&
  307             (cam_ccbq_frozen(&dev->ccbq, CAM_PRIORITY_TO_RL(
  308                 CAMQ_GET_PRIO(&dev->drvq))) == 0)) {
  309                 /*
  310                  * The priority of a device waiting for CCB resources
  311                  * is that of the the highest priority peripheral driver
  312                  * enqueued.
  313                  */
  314                 retval = xpt_schedule_dev(&bus->sim->devq->alloc_queue,
  315                                           &dev->alloc_ccb_entry.pinfo,
  316                                           CAMQ_GET_PRIO(&dev->drvq));
  317         } else {
  318                 retval = 0;
  319         }
  320 
  321         return (retval);
  322 }
  323 
  324 static __inline int
  325 xpt_schedule_dev_sendq(struct cam_eb *bus, struct cam_ed *dev)
  326 {
  327         int     retval;
  328 
  329         if ((dev->ccbq.queue.entries > 0) &&
  330             (dev->ccbq.dev_openings > 0) &&
  331             (cam_ccbq_frozen_top(&dev->ccbq) == 0)) {
  332                 /*
  333                  * The priority of a device waiting for controller
  334                  * resources is that of the the highest priority CCB
  335                  * enqueued.
  336                  */
  337                 retval =
  338                     xpt_schedule_dev(&bus->sim->devq->send_queue,
  339                                      &dev->send_ccb_entry.pinfo,
  340                                      CAMQ_GET_PRIO(&dev->ccbq.queue));
  341         } else {
  342                 retval = 0;
  343         }
  344         return (retval);
  345 }
  346 
  347 static __inline int
  348 periph_is_queued(struct cam_periph *periph)
  349 {
  350         return (periph->pinfo.index != CAM_UNQUEUED_INDEX);
  351 }
  352 
  353 static __inline int
  354 device_is_alloc_queued(struct cam_ed *device)
  355 {
  356         return (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
  357 }
  358 
  359 static __inline int
  360 device_is_send_queued(struct cam_ed *device)
  361 {
  362         return (device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
  363 }
  364 
  365 static void
  366 xpt_periph_init()
  367 {
  368         make_dev(&xpt_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "xpt0");
  369 }
  370 
  371 static void
  372 xptdone(struct cam_periph *periph, union ccb *done_ccb)
  373 {
  374         /* Caller will release the CCB */
  375         wakeup(&done_ccb->ccb_h.cbfcnp);
  376 }
  377 
  378 static int
  379 xptopen(struct cdev *dev, int flags, int fmt, struct thread *td)
  380 {
  381 
  382         /*
  383          * Only allow read-write access.
  384          */
  385         if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0))
  386                 return(EPERM);
  387 
  388         /*
  389          * We don't allow nonblocking access.
  390          */
  391         if ((flags & O_NONBLOCK) != 0) {
  392                 printf("%s: can't do nonblocking access\n", devtoname(dev));
  393                 return(ENODEV);
  394         }
  395 
  396         /* Mark ourselves open */
  397         mtx_lock(&xsoftc.xpt_lock);
  398         xsoftc.flags |= XPT_FLAG_OPEN;
  399         mtx_unlock(&xsoftc.xpt_lock);
  400 
  401         return(0);
  402 }
  403 
  404 static int
  405 xptclose(struct cdev *dev, int flag, int fmt, struct thread *td)
  406 {
  407 
  408         /* Mark ourselves closed */
  409         mtx_lock(&xsoftc.xpt_lock);
  410         xsoftc.flags &= ~XPT_FLAG_OPEN;
  411         mtx_unlock(&xsoftc.xpt_lock);
  412 
  413         return(0);
  414 }
  415 
  416 /*
  417  * Don't automatically grab the xpt softc lock here even though this is going
  418  * through the xpt device.  The xpt device is really just a back door for
  419  * accessing other devices and SIMs, so the right thing to do is to grab
  420  * the appropriate SIM lock once the bus/SIM is located.
  421  */
  422 static int
  423 xptioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
  424 {
  425         int error;
  426 
  427         error = 0;
  428 
  429         switch(cmd) {
  430         /*
  431          * For the transport layer CAMIOCOMMAND ioctl, we really only want
  432          * to accept CCB types that don't quite make sense to send through a
  433          * passthrough driver. XPT_PATH_INQ is an exception to this, as stated
  434          * in the CAM spec.
  435          */
  436         case CAMIOCOMMAND: {
  437                 union ccb *ccb;
  438                 union ccb *inccb;
  439                 struct cam_eb *bus;
  440 
  441                 inccb = (union ccb *)addr;
  442 
  443                 bus = xpt_find_bus(inccb->ccb_h.path_id);
  444                 if (bus == NULL) {
  445                         error = EINVAL;
  446                         break;
  447                 }
  448 
  449                 switch(inccb->ccb_h.func_code) {
  450                 case XPT_SCAN_BUS:
  451                 case XPT_RESET_BUS:
  452                         if ((inccb->ccb_h.target_id != CAM_TARGET_WILDCARD)
  453                          || (inccb->ccb_h.target_lun != CAM_LUN_WILDCARD)) {
  454                                 error = EINVAL;
  455                                 break;
  456                         }
  457                         /* FALLTHROUGH */
  458                 case XPT_PATH_INQ:
  459                 case XPT_ENG_INQ:
  460                 case XPT_SCAN_LUN:
  461 
  462                         ccb = xpt_alloc_ccb();
  463 
  464                         CAM_SIM_LOCK(bus->sim);
  465 
  466                         /*
  467                          * Create a path using the bus, target, and lun the
  468                          * user passed in.
  469                          */
  470                         if (xpt_create_path(&ccb->ccb_h.path, xpt_periph,
  471                                             inccb->ccb_h.path_id,
  472                                             inccb->ccb_h.target_id,
  473                                             inccb->ccb_h.target_lun) !=
  474                                             CAM_REQ_CMP){
  475                                 error = EINVAL;
  476                                 CAM_SIM_UNLOCK(bus->sim);
  477                                 xpt_free_ccb(ccb);
  478                                 break;
  479                         }
  480                         /* Ensure all of our fields are correct */
  481                         xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path,
  482                                       inccb->ccb_h.pinfo.priority);
  483                         xpt_merge_ccb(ccb, inccb);
  484                         ccb->ccb_h.cbfcnp = xptdone;
  485                         cam_periph_runccb(ccb, NULL, 0, 0, NULL);
  486                         bcopy(ccb, inccb, sizeof(union ccb));
  487                         xpt_free_path(ccb->ccb_h.path);
  488                         xpt_free_ccb(ccb);
  489                         CAM_SIM_UNLOCK(bus->sim);
  490                         break;
  491 
  492                 case XPT_DEBUG: {
  493                         union ccb ccb;
  494 
  495                         /*
  496                          * This is an immediate CCB, so it's okay to
  497                          * allocate it on the stack.
  498                          */
  499 
  500                         CAM_SIM_LOCK(bus->sim);
  501 
  502                         /*
  503                          * Create a path using the bus, target, and lun the
  504                          * user passed in.
  505                          */
  506                         if (xpt_create_path(&ccb.ccb_h.path, xpt_periph,
  507                                             inccb->ccb_h.path_id,
  508                                             inccb->ccb_h.target_id,
  509                                             inccb->ccb_h.target_lun) !=
  510                                             CAM_REQ_CMP){
  511                                 error = EINVAL;
  512                                 CAM_SIM_UNLOCK(bus->sim);
  513                                 break;
  514                         }
  515                         /* Ensure all of our fields are correct */
  516                         xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path,
  517                                       inccb->ccb_h.pinfo.priority);
  518                         xpt_merge_ccb(&ccb, inccb);
  519                         ccb.ccb_h.cbfcnp = xptdone;
  520                         xpt_action(&ccb);
  521                         CAM_SIM_UNLOCK(bus->sim);
  522                         bcopy(&ccb, inccb, sizeof(union ccb));
  523                         xpt_free_path(ccb.ccb_h.path);
  524                         break;
  525 
  526                 }
  527                 case XPT_DEV_MATCH: {
  528                         struct cam_periph_map_info mapinfo;
  529                         struct cam_path *old_path;
  530 
  531                         /*
  532                          * We can't deal with physical addresses for this
  533                          * type of transaction.
  534                          */
  535                         if (inccb->ccb_h.flags & CAM_DATA_PHYS) {
  536                                 error = EINVAL;
  537                                 break;
  538                         }
  539 
  540                         /*
  541                          * Save this in case the caller had it set to
  542                          * something in particular.
  543                          */
  544                         old_path = inccb->ccb_h.path;
  545 
  546                         /*
  547                          * We really don't need a path for the matching
  548                          * code.  The path is needed because of the
  549                          * debugging statements in xpt_action().  They
  550                          * assume that the CCB has a valid path.
  551                          */
  552                         inccb->ccb_h.path = xpt_periph->path;
  553 
  554                         bzero(&mapinfo, sizeof(mapinfo));
  555 
  556                         /*
  557                          * Map the pattern and match buffers into kernel
  558                          * virtual address space.
  559                          */
  560                         error = cam_periph_mapmem(inccb, &mapinfo);
  561 
  562                         if (error) {
  563                                 inccb->ccb_h.path = old_path;
  564                                 break;
  565                         }
  566 
  567                         /*
  568                          * This is an immediate CCB, we can send it on directly.
  569                          */
  570                         xpt_action(inccb);
  571 
  572                         /*
  573                          * Map the buffers back into user space.
  574                          */
  575                         cam_periph_unmapmem(inccb, &mapinfo);
  576 
  577                         inccb->ccb_h.path = old_path;
  578 
  579                         error = 0;
  580                         break;
  581                 }
  582                 default:
  583                         error = ENOTSUP;
  584                         break;
  585                 }
  586                 xpt_release_bus(bus);
  587                 break;
  588         }
  589         /*
  590          * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input,
  591          * with the periphal driver name and unit name filled in.  The other
  592          * fields don't really matter as input.  The passthrough driver name
  593          * ("pass"), and unit number are passed back in the ccb.  The current
  594          * device generation number, and the index into the device peripheral
  595          * driver list, and the status are also passed back.  Note that
  596          * since we do everything in one pass, unlike the XPT_GDEVLIST ccb,
  597          * we never return a status of CAM_GDEVLIST_LIST_CHANGED.  It is
  598          * (or rather should be) impossible for the device peripheral driver
  599          * list to change since we look at the whole thing in one pass, and
  600          * we do it with lock protection.
  601          *
  602          */
  603         case CAMGETPASSTHRU: {
  604                 union ccb *ccb;
  605                 struct cam_periph *periph;
  606                 struct periph_driver **p_drv;
  607                 char   *name;
  608                 u_int unit;
  609                 u_int cur_generation;
  610                 int base_periph_found;
  611                 int splbreaknum;
  612 
  613                 ccb = (union ccb *)addr;
  614                 unit = ccb->cgdl.unit_number;
  615                 name = ccb->cgdl.periph_name;
  616                 /*
  617                  * Every 100 devices, we want to drop our lock protection to
  618                  * give the software interrupt handler a chance to run.
  619                  * Most systems won't run into this check, but this should
  620                  * avoid starvation in the software interrupt handler in
  621                  * large systems.
  622                  */
  623                 splbreaknum = 100;
  624 
  625                 ccb = (union ccb *)addr;
  626 
  627                 base_periph_found = 0;
  628 
  629                 /*
  630                  * Sanity check -- make sure we don't get a null peripheral
  631                  * driver name.
  632                  */
  633                 if (*ccb->cgdl.periph_name == '\0') {
  634                         error = EINVAL;
  635                         break;
  636                 }
  637 
  638                 /* Keep the list from changing while we traverse it */
  639                 mtx_lock(&xsoftc.xpt_topo_lock);
  640 ptstartover:
  641                 cur_generation = xsoftc.xpt_generation;
  642 
  643                 /* first find our driver in the list of drivers */
  644                 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++)
  645                         if (strcmp((*p_drv)->driver_name, name) == 0)
  646                                 break;
  647 
  648                 if (*p_drv == NULL) {
  649                         mtx_unlock(&xsoftc.xpt_topo_lock);
  650                         ccb->ccb_h.status = CAM_REQ_CMP_ERR;
  651                         ccb->cgdl.status = CAM_GDEVLIST_ERROR;
  652                         *ccb->cgdl.periph_name = '\0';
  653                         ccb->cgdl.unit_number = 0;
  654                         error = ENOENT;
  655                         break;
  656                 }
  657 
  658                 /*
  659                  * Run through every peripheral instance of this driver
  660                  * and check to see whether it matches the unit passed
  661                  * in by the user.  If it does, get out of the loops and
  662                  * find the passthrough driver associated with that
  663                  * peripheral driver.
  664                  */
  665                 for (periph = TAILQ_FIRST(&(*p_drv)->units); periph != NULL;
  666                      periph = TAILQ_NEXT(periph, unit_links)) {
  667 
  668                         if (periph->unit_number == unit) {
  669                                 break;
  670                         } else if (--splbreaknum == 0) {
  671                                 mtx_unlock(&xsoftc.xpt_topo_lock);
  672                                 mtx_lock(&xsoftc.xpt_topo_lock);
  673                                 splbreaknum = 100;
  674                                 if (cur_generation != xsoftc.xpt_generation)
  675                                        goto ptstartover;
  676                         }
  677                 }
  678                 /*
  679                  * If we found the peripheral driver that the user passed
  680                  * in, go through all of the peripheral drivers for that
  681                  * particular device and look for a passthrough driver.
  682                  */
  683                 if (periph != NULL) {
  684                         struct cam_ed *device;
  685                         int i;
  686 
  687                         base_periph_found = 1;
  688                         device = periph->path->device;
  689                         for (i = 0, periph = SLIST_FIRST(&device->periphs);
  690                              periph != NULL;
  691                              periph = SLIST_NEXT(periph, periph_links), i++) {
  692                                 /*
  693                                  * Check to see whether we have a
  694                                  * passthrough device or not.
  695                                  */
  696                                 if (strcmp(periph->periph_name, "pass") == 0) {
  697                                         /*
  698                                          * Fill in the getdevlist fields.
  699                                          */
  700                                         strcpy(ccb->cgdl.periph_name,
  701                                                periph->periph_name);
  702                                         ccb->cgdl.unit_number =
  703                                                 periph->unit_number;
  704                                         if (SLIST_NEXT(periph, periph_links))
  705                                                 ccb->cgdl.status =
  706                                                         CAM_GDEVLIST_MORE_DEVS;
  707                                         else
  708                                                 ccb->cgdl.status =
  709                                                        CAM_GDEVLIST_LAST_DEVICE;
  710                                         ccb->cgdl.generation =
  711                                                 device->generation;
  712                                         ccb->cgdl.index = i;
  713                                         /*
  714                                          * Fill in some CCB header fields
  715                                          * that the user may want.
  716                                          */
  717                                         ccb->ccb_h.path_id =
  718                                                 periph->path->bus->path_id;
  719                                         ccb->ccb_h.target_id =
  720                                                 periph->path->target->target_id;
  721                                         ccb->ccb_h.target_lun =
  722                                                 periph->path->device->lun_id;
  723                                         ccb->ccb_h.status = CAM_REQ_CMP;
  724                                         break;
  725                                 }
  726                         }
  727                 }
  728 
  729                 /*
  730                  * If the periph is null here, one of two things has
  731                  * happened.  The first possibility is that we couldn't
  732                  * find the unit number of the particular peripheral driver
  733                  * that the user is asking about.  e.g. the user asks for
  734                  * the passthrough driver for "da11".  We find the list of
  735                  * "da" peripherals all right, but there is no unit 11.
  736                  * The other possibility is that we went through the list
  737                  * of peripheral drivers attached to the device structure,
  738                  * but didn't find one with the name "pass".  Either way,
  739                  * we return ENOENT, since we couldn't find something.
  740                  */
  741                 if (periph == NULL) {
  742                         ccb->ccb_h.status = CAM_REQ_CMP_ERR;
  743                         ccb->cgdl.status = CAM_GDEVLIST_ERROR;
  744                         *ccb->cgdl.periph_name = '\0';
  745                         ccb->cgdl.unit_number = 0;
  746                         error = ENOENT;
  747                         /*
  748                          * It is unfortunate that this is even necessary,
  749                          * but there are many, many clueless users out there.
  750                          * If this is true, the user is looking for the
  751                          * passthrough driver, but doesn't have one in his
  752                          * kernel.
  753                          */
  754                         if (base_periph_found == 1) {
  755                                 printf("xptioctl: pass driver is not in the "
  756                                        "kernel\n");
  757                                 printf("xptioctl: put \"device pass\" in "
  758                                        "your kernel config file\n");
  759                         }
  760                 }
  761                 mtx_unlock(&xsoftc.xpt_topo_lock);
  762                 break;
  763                 }
  764         default:
  765                 error = ENOTTY;
  766                 break;
  767         }
  768 
  769         return(error);
  770 }
  771 
  772 static int
  773 cam_module_event_handler(module_t mod, int what, void *arg)
  774 {
  775         int error;
  776 
  777         switch (what) {
  778         case MOD_LOAD:
  779                 if ((error = xpt_init(NULL)) != 0)
  780                         return (error);
  781                 break;
  782         case MOD_UNLOAD:
  783                 return EBUSY;
  784         default:
  785                 return EOPNOTSUPP;
  786         }
  787 
  788         return 0;
  789 }
  790 
  791 static void
  792 xpt_rescan_done(struct cam_periph *periph, union ccb *done_ccb)
  793 {
  794 
  795         if (done_ccb->ccb_h.ppriv_ptr1 == NULL) {
  796                 xpt_free_path(done_ccb->ccb_h.path);
  797                 xpt_free_ccb(done_ccb);
  798         } else {
  799                 done_ccb->ccb_h.cbfcnp = done_ccb->ccb_h.ppriv_ptr1;
  800                 (*done_ccb->ccb_h.cbfcnp)(periph, done_ccb);
  801         }
  802         xpt_release_boot();
  803 }
  804 
  805 /* thread to handle bus rescans */
  806 static void
  807 xpt_scanner_thread(void *dummy)
  808 {
  809         union ccb       *ccb;
  810         struct cam_sim  *sim;
  811 
  812         xpt_lock_buses();
  813         for (;;) {
  814                 if (TAILQ_EMPTY(&xsoftc.ccb_scanq))
  815                         msleep(&xsoftc.ccb_scanq, &xsoftc.xpt_topo_lock, PRIBIO,
  816                                "ccb_scanq", 0);
  817                 if ((ccb = (union ccb *)TAILQ_FIRST(&xsoftc.ccb_scanq)) != NULL) {
  818                         TAILQ_REMOVE(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe);
  819                         xpt_unlock_buses();
  820 
  821                         sim = ccb->ccb_h.path->bus->sim;
  822                         CAM_SIM_LOCK(sim);
  823                         xpt_action(ccb);
  824                         CAM_SIM_UNLOCK(sim);
  825 
  826                         xpt_lock_buses();
  827                 }
  828         }
  829 }
  830 
  831 void
  832 xpt_rescan(union ccb *ccb)
  833 {
  834         struct ccb_hdr *hdr;
  835 
  836         /* Prepare request */
  837         if (ccb->ccb_h.path->target->target_id == CAM_TARGET_WILDCARD ||
  838             ccb->ccb_h.path->device->lun_id == CAM_LUN_WILDCARD)
  839                 ccb->ccb_h.func_code = XPT_SCAN_BUS;
  840         else
  841                 ccb->ccb_h.func_code = XPT_SCAN_LUN;
  842         ccb->ccb_h.ppriv_ptr1 = ccb->ccb_h.cbfcnp;
  843         ccb->ccb_h.cbfcnp = xpt_rescan_done;
  844         xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, CAM_PRIORITY_XPT);
  845         /* Don't make duplicate entries for the same paths. */
  846         xpt_lock_buses();
  847         if (ccb->ccb_h.ppriv_ptr1 == NULL) {
  848                 TAILQ_FOREACH(hdr, &xsoftc.ccb_scanq, sim_links.tqe) {
  849                         if (xpt_path_comp(hdr->path, ccb->ccb_h.path) == 0) {
  850                                 wakeup(&xsoftc.ccb_scanq);
  851                                 xpt_unlock_buses();
  852                                 xpt_print(ccb->ccb_h.path, "rescan already queued\n");
  853                                 xpt_free_path(ccb->ccb_h.path);
  854                                 xpt_free_ccb(ccb);
  855                                 return;
  856                         }
  857                 }
  858         }
  859         TAILQ_INSERT_TAIL(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe);
  860         xsoftc.buses_to_config++;
  861         wakeup(&xsoftc.ccb_scanq);
  862         xpt_unlock_buses();
  863 }
  864 
  865 /* Functions accessed by the peripheral drivers */
  866 static int
  867 xpt_init(void *dummy)
  868 {
  869         struct cam_sim *xpt_sim;
  870         struct cam_path *path;
  871         struct cam_devq *devq;
  872         cam_status status;
  873 
  874         TAILQ_INIT(&xsoftc.xpt_busses);
  875         TAILQ_INIT(&cam_simq);
  876         TAILQ_INIT(&xsoftc.ccb_scanq);
  877         STAILQ_INIT(&xsoftc.highpowerq);
  878         xsoftc.num_highpower = CAM_MAX_HIGHPOWER;
  879 
  880         mtx_init(&cam_simq_lock, "CAM SIMQ lock", NULL, MTX_DEF);
  881         mtx_init(&xsoftc.xpt_lock, "XPT lock", NULL, MTX_DEF);
  882         mtx_init(&xsoftc.xpt_topo_lock, "XPT topology lock", NULL, MTX_DEF);
  883 
  884         /*
  885          * The xpt layer is, itself, the equivelent of a SIM.
  886          * Allow 16 ccbs in the ccb pool for it.  This should
  887          * give decent parallelism when we probe busses and
  888          * perform other XPT functions.
  889          */
  890         devq = cam_simq_alloc(16);
  891         xpt_sim = cam_sim_alloc(xptaction,
  892                                 xptpoll,
  893                                 "xpt",
  894                                 /*softc*/NULL,
  895                                 /*unit*/0,
  896                                 /*mtx*/&xsoftc.xpt_lock,
  897                                 /*max_dev_transactions*/0,
  898                                 /*max_tagged_dev_transactions*/0,
  899                                 devq);
  900         if (xpt_sim == NULL)
  901                 return (ENOMEM);
  902 
  903         mtx_lock(&xsoftc.xpt_lock);
  904         if ((status = xpt_bus_register(xpt_sim, NULL, 0)) != CAM_SUCCESS) {
  905                 mtx_unlock(&xsoftc.xpt_lock);
  906                 printf("xpt_init: xpt_bus_register failed with status %#x,"
  907                        " failing attach\n", status);
  908                 return (EINVAL);
  909         }
  910 
  911         /*
  912          * Looking at the XPT from the SIM layer, the XPT is
  913          * the equivelent of a peripheral driver.  Allocate
  914          * a peripheral driver entry for us.
  915          */
  916         if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID,
  917                                       CAM_TARGET_WILDCARD,
  918                                       CAM_LUN_WILDCARD)) != CAM_REQ_CMP) {
  919                 mtx_unlock(&xsoftc.xpt_lock);
  920                 printf("xpt_init: xpt_create_path failed with status %#x,"
  921                        " failing attach\n", status);
  922                 return (EINVAL);
  923         }
  924 
  925         cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO,
  926                          path, NULL, 0, xpt_sim);
  927         xpt_free_path(path);
  928         mtx_unlock(&xsoftc.xpt_lock);
  929         /* Install our software interrupt handlers */
  930         swi_add(NULL, "cambio", camisr, NULL, SWI_CAMBIO, INTR_MPSAFE, &cambio_ih);
  931         /*
  932          * Register a callback for when interrupts are enabled.
  933          */
  934         xsoftc.xpt_config_hook =
  935             (struct intr_config_hook *)malloc(sizeof(struct intr_config_hook),
  936                                               M_CAMXPT, M_NOWAIT | M_ZERO);
  937         if (xsoftc.xpt_config_hook == NULL) {
  938                 printf("xpt_init: Cannot malloc config hook "
  939                        "- failing attach\n");
  940                 return (ENOMEM);
  941         }
  942         xsoftc.xpt_config_hook->ich_func = xpt_config;
  943         if (config_intrhook_establish(xsoftc.xpt_config_hook) != 0) {
  944                 free (xsoftc.xpt_config_hook, M_CAMXPT);
  945                 printf("xpt_init: config_intrhook_establish failed "
  946                        "- failing attach\n");
  947         }
  948 
  949         return (0);
  950 }
  951 
  952 static cam_status
  953 xptregister(struct cam_periph *periph, void *arg)
  954 {
  955         struct cam_sim *xpt_sim;
  956 
  957         if (periph == NULL) {
  958                 printf("xptregister: periph was NULL!!\n");
  959                 return(CAM_REQ_CMP_ERR);
  960         }
  961 
  962         xpt_sim = (struct cam_sim *)arg;
  963         xpt_sim->softc = periph;
  964         xpt_periph = periph;
  965         periph->softc = NULL;
  966 
  967         return(CAM_REQ_CMP);
  968 }
  969 
  970 int32_t
  971 xpt_add_periph(struct cam_periph *periph)
  972 {
  973         struct cam_ed *device;
  974         int32_t  status;
  975         struct periph_list *periph_head;
  976 
  977         mtx_assert(periph->sim->mtx, MA_OWNED);
  978 
  979         device = periph->path->device;
  980 
  981         periph_head = &device->periphs;
  982 
  983         status = CAM_REQ_CMP;
  984 
  985         if (device != NULL) {
  986                 /*
  987                  * Make room for this peripheral
  988                  * so it will fit in the queue
  989                  * when it's scheduled to run
  990                  */
  991                 status = camq_resize(&device->drvq,
  992                                      device->drvq.array_size + 1);
  993 
  994                 device->generation++;
  995 
  996                 SLIST_INSERT_HEAD(periph_head, periph, periph_links);
  997         }
  998 
  999         mtx_lock(&xsoftc.xpt_topo_lock);
 1000         xsoftc.xpt_generation++;
 1001         mtx_unlock(&xsoftc.xpt_topo_lock);
 1002 
 1003         return (status);
 1004 }
 1005 
 1006 void
 1007 xpt_remove_periph(struct cam_periph *periph)
 1008 {
 1009         struct cam_ed *device;
 1010 
 1011         mtx_assert(periph->sim->mtx, MA_OWNED);
 1012 
 1013         device = periph->path->device;
 1014 
 1015         if (device != NULL) {
 1016                 struct periph_list *periph_head;
 1017 
 1018                 periph_head = &device->periphs;
 1019 
 1020                 /* Release the slot for this peripheral */
 1021                 camq_resize(&device->drvq, device->drvq.array_size - 1);
 1022 
 1023                 device->generation++;
 1024 
 1025                 SLIST_REMOVE(periph_head, periph, cam_periph, periph_links);
 1026         }
 1027 
 1028         mtx_lock(&xsoftc.xpt_topo_lock);
 1029         xsoftc.xpt_generation++;
 1030         mtx_unlock(&xsoftc.xpt_topo_lock);
 1031 }
 1032 
 1033 
 1034 void
 1035 xpt_announce_periph(struct cam_periph *periph, char *announce_string)
 1036 {
 1037         struct  cam_path *path = periph->path;
 1038 
 1039         mtx_assert(periph->sim->mtx, MA_OWNED);
 1040 
 1041         printf("%s%d at %s%d bus %d scbus%d target %d lun %d\n",
 1042                periph->periph_name, periph->unit_number,
 1043                path->bus->sim->sim_name,
 1044                path->bus->sim->unit_number,
 1045                path->bus->sim->bus_id,
 1046                path->bus->path_id,
 1047                path->target->target_id,
 1048                path->device->lun_id);
 1049         printf("%s%d: ", periph->periph_name, periph->unit_number);
 1050         if (path->device->protocol == PROTO_SCSI)
 1051                 scsi_print_inquiry(&path->device->inq_data);
 1052         else if (path->device->protocol == PROTO_ATA ||
 1053             path->device->protocol == PROTO_SATAPM)
 1054                 ata_print_ident(&path->device->ident_data);
 1055         else
 1056                 printf("Unknown protocol device\n");
 1057         if (bootverbose && path->device->serial_num_len > 0) {
 1058                 /* Don't wrap the screen  - print only the first 60 chars */
 1059                 printf("%s%d: Serial Number %.60s\n", periph->periph_name,
 1060                        periph->unit_number, path->device->serial_num);
 1061         }
 1062         /* Announce transport details. */
 1063         (*(path->bus->xport->announce))(periph);
 1064         /* Announce command queueing. */
 1065         if (path->device->inq_flags & SID_CmdQue
 1066          || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
 1067                 printf("%s%d: Command Queueing enabled\n",
 1068                        periph->periph_name, periph->unit_number);
 1069         }
 1070         /* Announce caller's details if they've passed in. */
 1071         if (announce_string != NULL)
 1072                 printf("%s%d: %s\n", periph->periph_name,
 1073                        periph->unit_number, announce_string);
 1074 }
 1075 
 1076 static dev_match_ret
 1077 xptbusmatch(struct dev_match_pattern *patterns, u_int num_patterns,
 1078             struct cam_eb *bus)
 1079 {
 1080         dev_match_ret retval;
 1081         int i;
 1082 
 1083         retval = DM_RET_NONE;
 1084 
 1085         /*
 1086          * If we aren't given something to match against, that's an error.
 1087          */
 1088         if (bus == NULL)
 1089                 return(DM_RET_ERROR);
 1090 
 1091         /*
 1092          * If there are no match entries, then this bus matches no
 1093          * matter what.
 1094          */
 1095         if ((patterns == NULL) || (num_patterns == 0))
 1096                 return(DM_RET_DESCEND | DM_RET_COPY);
 1097 
 1098         for (i = 0; i < num_patterns; i++) {
 1099                 struct bus_match_pattern *cur_pattern;
 1100 
 1101                 /*
 1102                  * If the pattern in question isn't for a bus node, we
 1103                  * aren't interested.  However, we do indicate to the
 1104                  * calling routine that we should continue descending the
 1105                  * tree, since the user wants to match against lower-level
 1106                  * EDT elements.
 1107                  */
 1108                 if (patterns[i].type != DEV_MATCH_BUS) {
 1109                         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
 1110                                 retval |= DM_RET_DESCEND;
 1111                         continue;
 1112                 }
 1113 
 1114                 cur_pattern = &patterns[i].pattern.bus_pattern;
 1115 
 1116                 /*
 1117                  * If they want to match any bus node, we give them any
 1118                  * device node.
 1119                  */
 1120                 if (cur_pattern->flags == BUS_MATCH_ANY) {
 1121                         /* set the copy flag */
 1122                         retval |= DM_RET_COPY;
 1123 
 1124                         /*
 1125                          * If we've already decided on an action, go ahead
 1126                          * and return.
 1127                          */
 1128                         if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
 1129                                 return(retval);
 1130                 }
 1131 
 1132                 /*
 1133                  * Not sure why someone would do this...
 1134                  */
 1135                 if (cur_pattern->flags == BUS_MATCH_NONE)
 1136                         continue;
 1137 
 1138                 if (((cur_pattern->flags & BUS_MATCH_PATH) != 0)
 1139                  && (cur_pattern->path_id != bus->path_id))
 1140                         continue;
 1141 
 1142                 if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0)
 1143                  && (cur_pattern->bus_id != bus->sim->bus_id))
 1144                         continue;
 1145 
 1146                 if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0)
 1147                  && (cur_pattern->unit_number != bus->sim->unit_number))
 1148                         continue;
 1149 
 1150                 if (((cur_pattern->flags & BUS_MATCH_NAME) != 0)
 1151                  && (strncmp(cur_pattern->dev_name, bus->sim->sim_name,
 1152                              DEV_IDLEN) != 0))
 1153                         continue;
 1154 
 1155                 /*
 1156                  * If we get to this point, the user definitely wants
 1157                  * information on this bus.  So tell the caller to copy the
 1158                  * data out.
 1159                  */
 1160                 retval |= DM_RET_COPY;
 1161 
 1162                 /*
 1163                  * If the return action has been set to descend, then we
 1164                  * know that we've already seen a non-bus matching
 1165                  * expression, therefore we need to further descend the tree.
 1166                  * This won't change by continuing around the loop, so we
 1167                  * go ahead and return.  If we haven't seen a non-bus
 1168                  * matching expression, we keep going around the loop until
 1169                  * we exhaust the matching expressions.  We'll set the stop
 1170                  * flag once we fall out of the loop.
 1171                  */
 1172                 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
 1173                         return(retval);
 1174         }
 1175 
 1176         /*
 1177          * If the return action hasn't been set to descend yet, that means
 1178          * we haven't seen anything other than bus matching patterns.  So
 1179          * tell the caller to stop descending the tree -- the user doesn't
 1180          * want to match against lower level tree elements.
 1181          */
 1182         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
 1183                 retval |= DM_RET_STOP;
 1184 
 1185         return(retval);
 1186 }
 1187 
 1188 static dev_match_ret
 1189 xptdevicematch(struct dev_match_pattern *patterns, u_int num_patterns,
 1190                struct cam_ed *device)
 1191 {
 1192         dev_match_ret retval;
 1193         int i;
 1194 
 1195         retval = DM_RET_NONE;
 1196 
 1197         /*
 1198          * If we aren't given something to match against, that's an error.
 1199          */
 1200         if (device == NULL)
 1201                 return(DM_RET_ERROR);
 1202 
 1203         /*
 1204          * If there are no match entries, then this device matches no
 1205          * matter what.
 1206          */
 1207         if ((patterns == NULL) || (num_patterns == 0))
 1208                 return(DM_RET_DESCEND | DM_RET_COPY);
 1209 
 1210         for (i = 0; i < num_patterns; i++) {
 1211                 struct device_match_pattern *cur_pattern;
 1212 
 1213                 /*
 1214                  * If the pattern in question isn't for a device node, we
 1215                  * aren't interested.
 1216                  */
 1217                 if (patterns[i].type != DEV_MATCH_DEVICE) {
 1218                         if ((patterns[i].type == DEV_MATCH_PERIPH)
 1219                          && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE))
 1220                                 retval |= DM_RET_DESCEND;
 1221                         continue;
 1222                 }
 1223 
 1224                 cur_pattern = &patterns[i].pattern.device_pattern;
 1225 
 1226                 /*
 1227                  * If they want to match any device node, we give them any
 1228                  * device node.
 1229                  */
 1230                 if (cur_pattern->flags == DEV_MATCH_ANY) {
 1231                         /* set the copy flag */
 1232                         retval |= DM_RET_COPY;
 1233 
 1234 
 1235                         /*
 1236                          * If we've already decided on an action, go ahead
 1237                          * and return.
 1238                          */
 1239                         if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
 1240                                 return(retval);
 1241                 }
 1242 
 1243                 /*
 1244                  * Not sure why someone would do this...
 1245                  */
 1246                 if (cur_pattern->flags == DEV_MATCH_NONE)
 1247                         continue;
 1248 
 1249                 if (((cur_pattern->flags & DEV_MATCH_PATH) != 0)
 1250                  && (cur_pattern->path_id != device->target->bus->path_id))
 1251                         continue;
 1252 
 1253                 if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0)
 1254                  && (cur_pattern->target_id != device->target->target_id))
 1255                         continue;
 1256 
 1257                 if (((cur_pattern->flags & DEV_MATCH_LUN) != 0)
 1258                  && (cur_pattern->target_lun != device->lun_id))
 1259                         continue;
 1260 
 1261                 if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0)
 1262                  && (cam_quirkmatch((caddr_t)&device->inq_data,
 1263                                     (caddr_t)&cur_pattern->inq_pat,
 1264                                     1, sizeof(cur_pattern->inq_pat),
 1265                                     scsi_static_inquiry_match) == NULL))
 1266                         continue;
 1267 
 1268                 /*
 1269                  * If we get to this point, the user definitely wants
 1270                  * information on this device.  So tell the caller to copy
 1271                  * the data out.
 1272                  */
 1273                 retval |= DM_RET_COPY;
 1274 
 1275                 /*
 1276                  * If the return action has been set to descend, then we
 1277                  * know that we've already seen a peripheral matching
 1278                  * expression, therefore we need to further descend the tree.
 1279                  * This won't change by continuing around the loop, so we
 1280                  * go ahead and return.  If we haven't seen a peripheral
 1281                  * matching expression, we keep going around the loop until
 1282                  * we exhaust the matching expressions.  We'll set the stop
 1283                  * flag once we fall out of the loop.
 1284                  */
 1285                 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
 1286                         return(retval);
 1287         }
 1288 
 1289         /*
 1290          * If the return action hasn't been set to descend yet, that means
 1291          * we haven't seen any peripheral matching patterns.  So tell the
 1292          * caller to stop descending the tree -- the user doesn't want to
 1293          * match against lower level tree elements.
 1294          */
 1295         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
 1296                 retval |= DM_RET_STOP;
 1297 
 1298         return(retval);
 1299 }
 1300 
 1301 /*
 1302  * Match a single peripheral against any number of match patterns.
 1303  */
 1304 static dev_match_ret
 1305 xptperiphmatch(struct dev_match_pattern *patterns, u_int num_patterns,
 1306                struct cam_periph *periph)
 1307 {
 1308         dev_match_ret retval;
 1309         int i;
 1310 
 1311         /*
 1312          * If we aren't given something to match against, that's an error.
 1313          */
 1314         if (periph == NULL)
 1315                 return(DM_RET_ERROR);
 1316 
 1317         /*
 1318          * If there are no match entries, then this peripheral matches no
 1319          * matter what.
 1320          */
 1321         if ((patterns == NULL) || (num_patterns == 0))
 1322                 return(DM_RET_STOP | DM_RET_COPY);
 1323 
 1324         /*
 1325          * There aren't any nodes below a peripheral node, so there's no
 1326          * reason to descend the tree any further.
 1327          */
 1328         retval = DM_RET_STOP;
 1329 
 1330         for (i = 0; i < num_patterns; i++) {
 1331                 struct periph_match_pattern *cur_pattern;
 1332 
 1333                 /*
 1334                  * If the pattern in question isn't for a peripheral, we
 1335                  * aren't interested.
 1336                  */
 1337                 if (patterns[i].type != DEV_MATCH_PERIPH)
 1338                         continue;
 1339 
 1340                 cur_pattern = &patterns[i].pattern.periph_pattern;
 1341 
 1342                 /*
 1343                  * If they want to match on anything, then we will do so.
 1344                  */
 1345                 if (cur_pattern->flags == PERIPH_MATCH_ANY) {
 1346                         /* set the copy flag */
 1347                         retval |= DM_RET_COPY;
 1348 
 1349                         /*
 1350                          * We've already set the return action to stop,
 1351                          * since there are no nodes below peripherals in
 1352                          * the tree.
 1353                          */
 1354                         return(retval);
 1355                 }
 1356 
 1357                 /*
 1358                  * Not sure why someone would do this...
 1359                  */
 1360                 if (cur_pattern->flags == PERIPH_MATCH_NONE)
 1361                         continue;
 1362 
 1363                 if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0)
 1364                  && (cur_pattern->path_id != periph->path->bus->path_id))
 1365                         continue;
 1366 
 1367                 /*
 1368                  * For the target and lun id's, we have to make sure the
 1369                  * target and lun pointers aren't NULL.  The xpt peripheral
 1370                  * has a wildcard target and device.
 1371                  */
 1372                 if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0)
 1373                  && ((periph->path->target == NULL)
 1374                  ||(cur_pattern->target_id != periph->path->target->target_id)))
 1375                         continue;
 1376 
 1377                 if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0)
 1378                  && ((periph->path->device == NULL)
 1379                  || (cur_pattern->target_lun != periph->path->device->lun_id)))
 1380                         continue;
 1381 
 1382                 if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0)
 1383                  && (cur_pattern->unit_number != periph->unit_number))
 1384                         continue;
 1385 
 1386                 if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0)
 1387                  && (strncmp(cur_pattern->periph_name, periph->periph_name,
 1388                              DEV_IDLEN) != 0))
 1389                         continue;
 1390 
 1391                 /*
 1392                  * If we get to this point, the user definitely wants
 1393                  * information on this peripheral.  So tell the caller to
 1394                  * copy the data out.
 1395                  */
 1396                 retval |= DM_RET_COPY;
 1397 
 1398                 /*
 1399                  * The return action has already been set to stop, since
 1400                  * peripherals don't have any nodes below them in the EDT.
 1401                  */
 1402                 return(retval);
 1403         }
 1404 
 1405         /*
 1406          * If we get to this point, the peripheral that was passed in
 1407          * doesn't match any of the patterns.
 1408          */
 1409         return(retval);
 1410 }
 1411 
 1412 static int
 1413 xptedtbusfunc(struct cam_eb *bus, void *arg)
 1414 {
 1415         struct ccb_dev_match *cdm;
 1416         dev_match_ret retval;
 1417 
 1418         cdm = (struct ccb_dev_match *)arg;
 1419 
 1420         /*
 1421          * If our position is for something deeper in the tree, that means
 1422          * that we've already seen this node.  So, we keep going down.
 1423          */
 1424         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 1425          && (cdm->pos.cookie.bus == bus)
 1426          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 1427          && (cdm->pos.cookie.target != NULL))
 1428                 retval = DM_RET_DESCEND;
 1429         else
 1430                 retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus);
 1431 
 1432         /*
 1433          * If we got an error, bail out of the search.
 1434          */
 1435         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
 1436                 cdm->status = CAM_DEV_MATCH_ERROR;
 1437                 return(0);
 1438         }
 1439 
 1440         /*
 1441          * If the copy flag is set, copy this bus out.
 1442          */
 1443         if (retval & DM_RET_COPY) {
 1444                 int spaceleft, j;
 1445 
 1446                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
 1447                         sizeof(struct dev_match_result));
 1448 
 1449                 /*
 1450                  * If we don't have enough space to put in another
 1451                  * match result, save our position and tell the
 1452                  * user there are more devices to check.
 1453                  */
 1454                 if (spaceleft < sizeof(struct dev_match_result)) {
 1455                         bzero(&cdm->pos, sizeof(cdm->pos));
 1456                         cdm->pos.position_type =
 1457                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS;
 1458 
 1459                         cdm->pos.cookie.bus = bus;
 1460                         cdm->pos.generations[CAM_BUS_GENERATION]=
 1461                                 xsoftc.bus_generation;
 1462                         cdm->status = CAM_DEV_MATCH_MORE;
 1463                         return(0);
 1464                 }
 1465                 j = cdm->num_matches;
 1466                 cdm->num_matches++;
 1467                 cdm->matches[j].type = DEV_MATCH_BUS;
 1468                 cdm->matches[j].result.bus_result.path_id = bus->path_id;
 1469                 cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id;
 1470                 cdm->matches[j].result.bus_result.unit_number =
 1471                         bus->sim->unit_number;
 1472                 strncpy(cdm->matches[j].result.bus_result.dev_name,
 1473                         bus->sim->sim_name, DEV_IDLEN);
 1474         }
 1475 
 1476         /*
 1477          * If the user is only interested in busses, there's no
 1478          * reason to descend to the next level in the tree.
 1479          */
 1480         if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
 1481                 return(1);
 1482 
 1483         /*
 1484          * If there is a target generation recorded, check it to
 1485          * make sure the target list hasn't changed.
 1486          */
 1487         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 1488          && (bus == cdm->pos.cookie.bus)
 1489          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 1490          && (cdm->pos.generations[CAM_TARGET_GENERATION] != 0)
 1491          && (cdm->pos.generations[CAM_TARGET_GENERATION] !=
 1492              bus->generation)) {
 1493                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
 1494                 return(0);
 1495         }
 1496 
 1497         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 1498          && (cdm->pos.cookie.bus == bus)
 1499          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 1500          && (cdm->pos.cookie.target != NULL))
 1501                 return(xpttargettraverse(bus,
 1502                                         (struct cam_et *)cdm->pos.cookie.target,
 1503                                          xptedttargetfunc, arg));
 1504         else
 1505                 return(xpttargettraverse(bus, NULL, xptedttargetfunc, arg));
 1506 }
 1507 
 1508 static int
 1509 xptedttargetfunc(struct cam_et *target, void *arg)
 1510 {
 1511         struct ccb_dev_match *cdm;
 1512 
 1513         cdm = (struct ccb_dev_match *)arg;
 1514 
 1515         /*
 1516          * If there is a device list generation recorded, check it to
 1517          * make sure the device list hasn't changed.
 1518          */
 1519         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 1520          && (cdm->pos.cookie.bus == target->bus)
 1521          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 1522          && (cdm->pos.cookie.target == target)
 1523          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
 1524          && (cdm->pos.generations[CAM_DEV_GENERATION] != 0)
 1525          && (cdm->pos.generations[CAM_DEV_GENERATION] !=
 1526              target->generation)) {
 1527                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
 1528                 return(0);
 1529         }
 1530 
 1531         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 1532          && (cdm->pos.cookie.bus == target->bus)
 1533          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 1534          && (cdm->pos.cookie.target == target)
 1535          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
 1536          && (cdm->pos.cookie.device != NULL))
 1537                 return(xptdevicetraverse(target,
 1538                                         (struct cam_ed *)cdm->pos.cookie.device,
 1539                                          xptedtdevicefunc, arg));
 1540         else
 1541                 return(xptdevicetraverse(target, NULL, xptedtdevicefunc, arg));
 1542 }
 1543 
 1544 static int
 1545 xptedtdevicefunc(struct cam_ed *device, void *arg)
 1546 {
 1547 
 1548         struct ccb_dev_match *cdm;
 1549         dev_match_ret retval;
 1550 
 1551         cdm = (struct ccb_dev_match *)arg;
 1552 
 1553         /*
 1554          * If our position is for something deeper in the tree, that means
 1555          * that we've already seen this node.  So, we keep going down.
 1556          */
 1557         if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE)
 1558          && (cdm->pos.cookie.device == device)
 1559          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
 1560          && (cdm->pos.cookie.periph != NULL))
 1561                 retval = DM_RET_DESCEND;
 1562         else
 1563                 retval = xptdevicematch(cdm->patterns, cdm->num_patterns,
 1564                                         device);
 1565 
 1566         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
 1567                 cdm->status = CAM_DEV_MATCH_ERROR;
 1568                 return(0);
 1569         }
 1570 
 1571         /*
 1572          * If the copy flag is set, copy this device out.
 1573          */
 1574         if (retval & DM_RET_COPY) {
 1575                 int spaceleft, j;
 1576 
 1577                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
 1578                         sizeof(struct dev_match_result));
 1579 
 1580                 /*
 1581                  * If we don't have enough space to put in another
 1582                  * match result, save our position and tell the
 1583                  * user there are more devices to check.
 1584                  */
 1585                 if (spaceleft < sizeof(struct dev_match_result)) {
 1586                         bzero(&cdm->pos, sizeof(cdm->pos));
 1587                         cdm->pos.position_type =
 1588                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
 1589                                 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE;
 1590 
 1591                         cdm->pos.cookie.bus = device->target->bus;
 1592                         cdm->pos.generations[CAM_BUS_GENERATION]=
 1593                                 xsoftc.bus_generation;
 1594                         cdm->pos.cookie.target = device->target;
 1595                         cdm->pos.generations[CAM_TARGET_GENERATION] =
 1596                                 device->target->bus->generation;
 1597                         cdm->pos.cookie.device = device;
 1598                         cdm->pos.generations[CAM_DEV_GENERATION] =
 1599                                 device->target->generation;
 1600                         cdm->status = CAM_DEV_MATCH_MORE;
 1601                         return(0);
 1602                 }
 1603                 j = cdm->num_matches;
 1604                 cdm->num_matches++;
 1605                 cdm->matches[j].type = DEV_MATCH_DEVICE;
 1606                 cdm->matches[j].result.device_result.path_id =
 1607                         device->target->bus->path_id;
 1608                 cdm->matches[j].result.device_result.target_id =
 1609                         device->target->target_id;
 1610                 cdm->matches[j].result.device_result.target_lun =
 1611                         device->lun_id;
 1612                 cdm->matches[j].result.device_result.protocol =
 1613                         device->protocol;
 1614                 bcopy(&device->inq_data,
 1615                       &cdm->matches[j].result.device_result.inq_data,
 1616                       sizeof(struct scsi_inquiry_data));
 1617                 bcopy(&device->ident_data,
 1618                       &cdm->matches[j].result.device_result.ident_data,
 1619                       sizeof(struct ata_params));
 1620 
 1621                 /* Let the user know whether this device is unconfigured */
 1622                 if (device->flags & CAM_DEV_UNCONFIGURED)
 1623                         cdm->matches[j].result.device_result.flags =
 1624                                 DEV_RESULT_UNCONFIGURED;
 1625                 else
 1626                         cdm->matches[j].result.device_result.flags =
 1627                                 DEV_RESULT_NOFLAG;
 1628         }
 1629 
 1630         /*
 1631          * If the user isn't interested in peripherals, don't descend
 1632          * the tree any further.
 1633          */
 1634         if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
 1635                 return(1);
 1636 
 1637         /*
 1638          * If there is a peripheral list generation recorded, make sure
 1639          * it hasn't changed.
 1640          */
 1641         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 1642          && (device->target->bus == cdm->pos.cookie.bus)
 1643          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 1644          && (device->target == cdm->pos.cookie.target)
 1645          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
 1646          && (device == cdm->pos.cookie.device)
 1647          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
 1648          && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
 1649          && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
 1650              device->generation)){
 1651                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
 1652                 return(0);
 1653         }
 1654 
 1655         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 1656          && (cdm->pos.cookie.bus == device->target->bus)
 1657          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 1658          && (cdm->pos.cookie.target == device->target)
 1659          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
 1660          && (cdm->pos.cookie.device == device)
 1661          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
 1662          && (cdm->pos.cookie.periph != NULL))
 1663                 return(xptperiphtraverse(device,
 1664                                 (struct cam_periph *)cdm->pos.cookie.periph,
 1665                                 xptedtperiphfunc, arg));
 1666         else
 1667                 return(xptperiphtraverse(device, NULL, xptedtperiphfunc, arg));
 1668 }
 1669 
 1670 static int
 1671 xptedtperiphfunc(struct cam_periph *periph, void *arg)
 1672 {
 1673         struct ccb_dev_match *cdm;
 1674         dev_match_ret retval;
 1675 
 1676         cdm = (struct ccb_dev_match *)arg;
 1677 
 1678         retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
 1679 
 1680         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
 1681                 cdm->status = CAM_DEV_MATCH_ERROR;
 1682                 return(0);
 1683         }
 1684 
 1685         /*
 1686          * If the copy flag is set, copy this peripheral out.
 1687          */
 1688         if (retval & DM_RET_COPY) {
 1689                 int spaceleft, j;
 1690 
 1691                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
 1692                         sizeof(struct dev_match_result));
 1693 
 1694                 /*
 1695                  * If we don't have enough space to put in another
 1696                  * match result, save our position and tell the
 1697                  * user there are more devices to check.
 1698                  */
 1699                 if (spaceleft < sizeof(struct dev_match_result)) {
 1700                         bzero(&cdm->pos, sizeof(cdm->pos));
 1701                         cdm->pos.position_type =
 1702                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
 1703                                 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE |
 1704                                 CAM_DEV_POS_PERIPH;
 1705 
 1706                         cdm->pos.cookie.bus = periph->path->bus;
 1707                         cdm->pos.generations[CAM_BUS_GENERATION]=
 1708                                 xsoftc.bus_generation;
 1709                         cdm->pos.cookie.target = periph->path->target;
 1710                         cdm->pos.generations[CAM_TARGET_GENERATION] =
 1711                                 periph->path->bus->generation;
 1712                         cdm->pos.cookie.device = periph->path->device;
 1713                         cdm->pos.generations[CAM_DEV_GENERATION] =
 1714                                 periph->path->target->generation;
 1715                         cdm->pos.cookie.periph = periph;
 1716                         cdm->pos.generations[CAM_PERIPH_GENERATION] =
 1717                                 periph->path->device->generation;
 1718                         cdm->status = CAM_DEV_MATCH_MORE;
 1719                         return(0);
 1720                 }
 1721 
 1722                 j = cdm->num_matches;
 1723                 cdm->num_matches++;
 1724                 cdm->matches[j].type = DEV_MATCH_PERIPH;
 1725                 cdm->matches[j].result.periph_result.path_id =
 1726                         periph->path->bus->path_id;
 1727                 cdm->matches[j].result.periph_result.target_id =
 1728                         periph->path->target->target_id;
 1729                 cdm->matches[j].result.periph_result.target_lun =
 1730                         periph->path->device->lun_id;
 1731                 cdm->matches[j].result.periph_result.unit_number =
 1732                         periph->unit_number;
 1733                 strncpy(cdm->matches[j].result.periph_result.periph_name,
 1734                         periph->periph_name, DEV_IDLEN);
 1735         }
 1736 
 1737         return(1);
 1738 }
 1739 
 1740 static int
 1741 xptedtmatch(struct ccb_dev_match *cdm)
 1742 {
 1743         int ret;
 1744 
 1745         cdm->num_matches = 0;
 1746 
 1747         /*
 1748          * Check the bus list generation.  If it has changed, the user
 1749          * needs to reset everything and start over.
 1750          */
 1751         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 1752          && (cdm->pos.generations[CAM_BUS_GENERATION] != 0)
 1753          && (cdm->pos.generations[CAM_BUS_GENERATION] != xsoftc.bus_generation)) {
 1754                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
 1755                 return(0);
 1756         }
 1757 
 1758         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 1759          && (cdm->pos.cookie.bus != NULL))
 1760                 ret = xptbustraverse((struct cam_eb *)cdm->pos.cookie.bus,
 1761                                      xptedtbusfunc, cdm);
 1762         else
 1763                 ret = xptbustraverse(NULL, xptedtbusfunc, cdm);
 1764 
 1765         /*
 1766          * If we get back 0, that means that we had to stop before fully
 1767          * traversing the EDT.  It also means that one of the subroutines
 1768          * has set the status field to the proper value.  If we get back 1,
 1769          * we've fully traversed the EDT and copied out any matching entries.
 1770          */
 1771         if (ret == 1)
 1772                 cdm->status = CAM_DEV_MATCH_LAST;
 1773 
 1774         return(ret);
 1775 }
 1776 
 1777 static int
 1778 xptplistpdrvfunc(struct periph_driver **pdrv, void *arg)
 1779 {
 1780         struct ccb_dev_match *cdm;
 1781 
 1782         cdm = (struct ccb_dev_match *)arg;
 1783 
 1784         if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
 1785          && (cdm->pos.cookie.pdrv == pdrv)
 1786          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
 1787          && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
 1788          && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
 1789              (*pdrv)->generation)) {
 1790                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
 1791                 return(0);
 1792         }
 1793 
 1794         if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
 1795          && (cdm->pos.cookie.pdrv == pdrv)
 1796          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
 1797          && (cdm->pos.cookie.periph != NULL))
 1798                 return(xptpdperiphtraverse(pdrv,
 1799                                 (struct cam_periph *)cdm->pos.cookie.periph,
 1800                                 xptplistperiphfunc, arg));
 1801         else
 1802                 return(xptpdperiphtraverse(pdrv, NULL,xptplistperiphfunc, arg));
 1803 }
 1804 
 1805 static int
 1806 xptplistperiphfunc(struct cam_periph *periph, void *arg)
 1807 {
 1808         struct ccb_dev_match *cdm;
 1809         dev_match_ret retval;
 1810 
 1811         cdm = (struct ccb_dev_match *)arg;
 1812 
 1813         retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
 1814 
 1815         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
 1816                 cdm->status = CAM_DEV_MATCH_ERROR;
 1817                 return(0);
 1818         }
 1819 
 1820         /*
 1821          * If the copy flag is set, copy this peripheral out.
 1822          */
 1823         if (retval & DM_RET_COPY) {
 1824                 int spaceleft, j;
 1825 
 1826                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
 1827                         sizeof(struct dev_match_result));
 1828 
 1829                 /*
 1830                  * If we don't have enough space to put in another
 1831                  * match result, save our position and tell the
 1832                  * user there are more devices to check.
 1833                  */
 1834                 if (spaceleft < sizeof(struct dev_match_result)) {
 1835                         struct periph_driver **pdrv;
 1836 
 1837                         pdrv = NULL;
 1838                         bzero(&cdm->pos, sizeof(cdm->pos));
 1839                         cdm->pos.position_type =
 1840                                 CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR |
 1841                                 CAM_DEV_POS_PERIPH;
 1842 
 1843                         /*
 1844                          * This may look a bit non-sensical, but it is
 1845                          * actually quite logical.  There are very few
 1846                          * peripheral drivers, and bloating every peripheral
 1847                          * structure with a pointer back to its parent
 1848                          * peripheral driver linker set entry would cost
 1849                          * more in the long run than doing this quick lookup.
 1850                          */
 1851                         for (pdrv = periph_drivers; *pdrv != NULL; pdrv++) {
 1852                                 if (strcmp((*pdrv)->driver_name,
 1853                                     periph->periph_name) == 0)
 1854                                         break;
 1855                         }
 1856 
 1857                         if (*pdrv == NULL) {
 1858                                 cdm->status = CAM_DEV_MATCH_ERROR;
 1859                                 return(0);
 1860                         }
 1861 
 1862                         cdm->pos.cookie.pdrv = pdrv;
 1863                         /*
 1864                          * The periph generation slot does double duty, as
 1865                          * does the periph pointer slot.  They are used for
 1866                          * both edt and pdrv lookups and positioning.
 1867                          */
 1868                         cdm->pos.cookie.periph = periph;
 1869                         cdm->pos.generations[CAM_PERIPH_GENERATION] =
 1870                                 (*pdrv)->generation;
 1871                         cdm->status = CAM_DEV_MATCH_MORE;
 1872                         return(0);
 1873                 }
 1874 
 1875                 j = cdm->num_matches;
 1876                 cdm->num_matches++;
 1877                 cdm->matches[j].type = DEV_MATCH_PERIPH;
 1878                 cdm->matches[j].result.periph_result.path_id =
 1879                         periph->path->bus->path_id;
 1880 
 1881                 /*
 1882                  * The transport layer peripheral doesn't have a target or
 1883                  * lun.
 1884                  */
 1885                 if (periph->path->target)
 1886                         cdm->matches[j].result.periph_result.target_id =
 1887                                 periph->path->target->target_id;
 1888                 else
 1889                         cdm->matches[j].result.periph_result.target_id = -1;
 1890 
 1891                 if (periph->path->device)
 1892                         cdm->matches[j].result.periph_result.target_lun =
 1893                                 periph->path->device->lun_id;
 1894                 else
 1895                         cdm->matches[j].result.periph_result.target_lun = -1;
 1896 
 1897                 cdm->matches[j].result.periph_result.unit_number =
 1898                         periph->unit_number;
 1899                 strncpy(cdm->matches[j].result.periph_result.periph_name,
 1900                         periph->periph_name, DEV_IDLEN);
 1901         }
 1902 
 1903         return(1);
 1904 }
 1905 
 1906 static int
 1907 xptperiphlistmatch(struct ccb_dev_match *cdm)
 1908 {
 1909         int ret;
 1910 
 1911         cdm->num_matches = 0;
 1912 
 1913         /*
 1914          * At this point in the edt traversal function, we check the bus
 1915          * list generation to make sure that no busses have been added or
 1916          * removed since the user last sent a XPT_DEV_MATCH ccb through.
 1917          * For the peripheral driver list traversal function, however, we
 1918          * don't have to worry about new peripheral driver types coming or
 1919          * going; they're in a linker set, and therefore can't change
 1920          * without a recompile.
 1921          */
 1922 
 1923         if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
 1924          && (cdm->pos.cookie.pdrv != NULL))
 1925                 ret = xptpdrvtraverse(
 1926                                 (struct periph_driver **)cdm->pos.cookie.pdrv,
 1927                                 xptplistpdrvfunc, cdm);
 1928         else
 1929                 ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm);
 1930 
 1931         /*
 1932          * If we get back 0, that means that we had to stop before fully
 1933          * traversing the peripheral driver tree.  It also means that one of
 1934          * the subroutines has set the status field to the proper value.  If
 1935          * we get back 1, we've fully traversed the EDT and copied out any
 1936          * matching entries.
 1937          */
 1938         if (ret == 1)
 1939                 cdm->status = CAM_DEV_MATCH_LAST;
 1940 
 1941         return(ret);
 1942 }
 1943 
 1944 static int
 1945 xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg)
 1946 {
 1947         struct cam_eb *bus, *next_bus;
 1948         int retval;
 1949 
 1950         retval = 1;
 1951 
 1952         mtx_lock(&xsoftc.xpt_topo_lock);
 1953         for (bus = (start_bus ? start_bus : TAILQ_FIRST(&xsoftc.xpt_busses));
 1954              bus != NULL;
 1955              bus = next_bus) {
 1956                 next_bus = TAILQ_NEXT(bus, links);
 1957 
 1958                 mtx_unlock(&xsoftc.xpt_topo_lock);
 1959                 CAM_SIM_LOCK(bus->sim);
 1960                 retval = tr_func(bus, arg);
 1961                 CAM_SIM_UNLOCK(bus->sim);
 1962                 if (retval == 0)
 1963                         return(retval);
 1964                 mtx_lock(&xsoftc.xpt_topo_lock);
 1965         }
 1966         mtx_unlock(&xsoftc.xpt_topo_lock);
 1967 
 1968         return(retval);
 1969 }
 1970 
 1971 int
 1972 xpt_sim_opened(struct cam_sim *sim)
 1973 {
 1974         struct cam_eb *bus;
 1975         struct cam_et *target;
 1976         struct cam_ed *device;
 1977         struct cam_periph *periph;
 1978 
 1979         KASSERT(sim->refcount >= 1, ("sim->refcount >= 1"));
 1980         mtx_assert(sim->mtx, MA_OWNED);
 1981 
 1982         mtx_lock(&xsoftc.xpt_topo_lock);
 1983         TAILQ_FOREACH(bus, &xsoftc.xpt_busses, links) {
 1984                 if (bus->sim != sim)
 1985                         continue;
 1986 
 1987                 TAILQ_FOREACH(target, &bus->et_entries, links) {
 1988                         TAILQ_FOREACH(device, &target->ed_entries, links) {
 1989                                 SLIST_FOREACH(periph, &device->periphs,
 1990                                     periph_links) {
 1991                                         if (periph->refcount > 0) {
 1992                                                 mtx_unlock(&xsoftc.xpt_topo_lock);
 1993                                                 return (1);
 1994                                         }
 1995                                 }
 1996                         }
 1997                 }
 1998         }
 1999 
 2000         mtx_unlock(&xsoftc.xpt_topo_lock);
 2001         return (0);
 2002 }
 2003 
 2004 static int
 2005 xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target,
 2006                   xpt_targetfunc_t *tr_func, void *arg)
 2007 {
 2008         struct cam_et *target, *next_target;
 2009         int retval;
 2010 
 2011         retval = 1;
 2012         for (target = (start_target ? start_target :
 2013                        TAILQ_FIRST(&bus->et_entries));
 2014              target != NULL; target = next_target) {
 2015 
 2016                 next_target = TAILQ_NEXT(target, links);
 2017 
 2018                 retval = tr_func(target, arg);
 2019 
 2020                 if (retval == 0)
 2021                         return(retval);
 2022         }
 2023 
 2024         return(retval);
 2025 }
 2026 
 2027 static int
 2028 xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device,
 2029                   xpt_devicefunc_t *tr_func, void *arg)
 2030 {
 2031         struct cam_ed *device, *next_device;
 2032         int retval;
 2033 
 2034         retval = 1;
 2035         for (device = (start_device ? start_device :
 2036                        TAILQ_FIRST(&target->ed_entries));
 2037              device != NULL;
 2038              device = next_device) {
 2039 
 2040                 next_device = TAILQ_NEXT(device, links);
 2041 
 2042                 retval = tr_func(device, arg);
 2043 
 2044                 if (retval == 0)
 2045                         return(retval);
 2046         }
 2047 
 2048         return(retval);
 2049 }
 2050 
 2051 static int
 2052 xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph,
 2053                   xpt_periphfunc_t *tr_func, void *arg)
 2054 {
 2055         struct cam_periph *periph, *next_periph;
 2056         int retval;
 2057 
 2058         retval = 1;
 2059 
 2060         for (periph = (start_periph ? start_periph :
 2061                        SLIST_FIRST(&device->periphs));
 2062              periph != NULL;
 2063              periph = next_periph) {
 2064 
 2065                 next_periph = SLIST_NEXT(periph, periph_links);
 2066 
 2067                 retval = tr_func(periph, arg);
 2068                 if (retval == 0)
 2069                         return(retval);
 2070         }
 2071 
 2072         return(retval);
 2073 }
 2074 
 2075 static int
 2076 xptpdrvtraverse(struct periph_driver **start_pdrv,
 2077                 xpt_pdrvfunc_t *tr_func, void *arg)
 2078 {
 2079         struct periph_driver **pdrv;
 2080         int retval;
 2081 
 2082         retval = 1;
 2083 
 2084         /*
 2085          * We don't traverse the peripheral driver list like we do the
 2086          * other lists, because it is a linker set, and therefore cannot be
 2087          * changed during runtime.  If the peripheral driver list is ever
 2088          * re-done to be something other than a linker set (i.e. it can
 2089          * change while the system is running), the list traversal should
 2090          * be modified to work like the other traversal functions.
 2091          */
 2092         for (pdrv = (start_pdrv ? start_pdrv : periph_drivers);
 2093              *pdrv != NULL; pdrv++) {
 2094                 retval = tr_func(pdrv, arg);
 2095 
 2096                 if (retval == 0)
 2097                         return(retval);
 2098         }
 2099 
 2100         return(retval);
 2101 }
 2102 
 2103 static int
 2104 xptpdperiphtraverse(struct periph_driver **pdrv,
 2105                     struct cam_periph *start_periph,
 2106                     xpt_periphfunc_t *tr_func, void *arg)
 2107 {
 2108         struct cam_periph *periph, *next_periph;
 2109         int retval;
 2110 
 2111         retval = 1;
 2112 
 2113         xpt_lock_buses();
 2114         for (periph = (start_periph ? start_periph :
 2115              TAILQ_FIRST(&(*pdrv)->units)); periph != NULL;
 2116              periph = next_periph) {
 2117 
 2118                 next_periph = TAILQ_NEXT(periph, unit_links);
 2119 
 2120                 retval = tr_func(periph, arg);
 2121                 if (retval == 0) {
 2122                         xpt_unlock_buses();
 2123                         return(retval);
 2124                 }
 2125         }
 2126         xpt_unlock_buses();
 2127         return(retval);
 2128 }
 2129 
 2130 static int
 2131 xptdefbusfunc(struct cam_eb *bus, void *arg)
 2132 {
 2133         struct xpt_traverse_config *tr_config;
 2134 
 2135         tr_config = (struct xpt_traverse_config *)arg;
 2136 
 2137         if (tr_config->depth == XPT_DEPTH_BUS) {
 2138                 xpt_busfunc_t *tr_func;
 2139 
 2140                 tr_func = (xpt_busfunc_t *)tr_config->tr_func;
 2141 
 2142                 return(tr_func(bus, tr_config->tr_arg));
 2143         } else
 2144                 return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg));
 2145 }
 2146 
 2147 static int
 2148 xptdeftargetfunc(struct cam_et *target, void *arg)
 2149 {
 2150         struct xpt_traverse_config *tr_config;
 2151 
 2152         tr_config = (struct xpt_traverse_config *)arg;
 2153 
 2154         if (tr_config->depth == XPT_DEPTH_TARGET) {
 2155                 xpt_targetfunc_t *tr_func;
 2156 
 2157                 tr_func = (xpt_targetfunc_t *)tr_config->tr_func;
 2158 
 2159                 return(tr_func(target, tr_config->tr_arg));
 2160         } else
 2161                 return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg));
 2162 }
 2163 
 2164 static int
 2165 xptdefdevicefunc(struct cam_ed *device, void *arg)
 2166 {
 2167         struct xpt_traverse_config *tr_config;
 2168 
 2169         tr_config = (struct xpt_traverse_config *)arg;
 2170 
 2171         if (tr_config->depth == XPT_DEPTH_DEVICE) {
 2172                 xpt_devicefunc_t *tr_func;
 2173 
 2174                 tr_func = (xpt_devicefunc_t *)tr_config->tr_func;
 2175 
 2176                 return(tr_func(device, tr_config->tr_arg));
 2177         } else
 2178                 return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg));
 2179 }
 2180 
 2181 static int
 2182 xptdefperiphfunc(struct cam_periph *periph, void *arg)
 2183 {
 2184         struct xpt_traverse_config *tr_config;
 2185         xpt_periphfunc_t *tr_func;
 2186 
 2187         tr_config = (struct xpt_traverse_config *)arg;
 2188 
 2189         tr_func = (xpt_periphfunc_t *)tr_config->tr_func;
 2190 
 2191         /*
 2192          * Unlike the other default functions, we don't check for depth
 2193          * here.  The peripheral driver level is the last level in the EDT,
 2194          * so if we're here, we should execute the function in question.
 2195          */
 2196         return(tr_func(periph, tr_config->tr_arg));
 2197 }
 2198 
 2199 /*
 2200  * Execute the given function for every bus in the EDT.
 2201  */
 2202 static int
 2203 xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg)
 2204 {
 2205         struct xpt_traverse_config tr_config;
 2206 
 2207         tr_config.depth = XPT_DEPTH_BUS;
 2208         tr_config.tr_func = tr_func;
 2209         tr_config.tr_arg = arg;
 2210 
 2211         return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
 2212 }
 2213 
 2214 /*
 2215  * Execute the given function for every device in the EDT.
 2216  */
 2217 static int
 2218 xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg)
 2219 {
 2220         struct xpt_traverse_config tr_config;
 2221 
 2222         tr_config.depth = XPT_DEPTH_DEVICE;
 2223         tr_config.tr_func = tr_func;
 2224         tr_config.tr_arg = arg;
 2225 
 2226         return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
 2227 }
 2228 
 2229 static int
 2230 xptsetasyncfunc(struct cam_ed *device, void *arg)
 2231 {
 2232         struct cam_path path;
 2233         struct ccb_getdev cgd;
 2234         struct ccb_setasync *csa = (struct ccb_setasync *)arg;
 2235 
 2236         /*
 2237          * Don't report unconfigured devices (Wildcard devs,
 2238          * devices only for target mode, device instances
 2239          * that have been invalidated but are waiting for
 2240          * their last reference count to be released).
 2241          */
 2242         if ((device->flags & CAM_DEV_UNCONFIGURED) != 0)
 2243                 return (1);
 2244 
 2245         xpt_compile_path(&path,
 2246                          NULL,
 2247                          device->target->bus->path_id,
 2248                          device->target->target_id,
 2249                          device->lun_id);
 2250         xpt_setup_ccb(&cgd.ccb_h, &path, CAM_PRIORITY_NORMAL);
 2251         cgd.ccb_h.func_code = XPT_GDEV_TYPE;
 2252         xpt_action((union ccb *)&cgd);
 2253         csa->callback(csa->callback_arg,
 2254                             AC_FOUND_DEVICE,
 2255                             &path, &cgd);
 2256         xpt_release_path(&path);
 2257 
 2258         return(1);
 2259 }
 2260 
 2261 static int
 2262 xptsetasyncbusfunc(struct cam_eb *bus, void *arg)
 2263 {
 2264         struct cam_path path;
 2265         struct ccb_pathinq cpi;
 2266         struct ccb_setasync *csa = (struct ccb_setasync *)arg;
 2267 
 2268         xpt_compile_path(&path, /*periph*/NULL,
 2269                          bus->sim->path_id,
 2270                          CAM_TARGET_WILDCARD,
 2271                          CAM_LUN_WILDCARD);
 2272         xpt_setup_ccb(&cpi.ccb_h, &path, CAM_PRIORITY_NORMAL);
 2273         cpi.ccb_h.func_code = XPT_PATH_INQ;
 2274         xpt_action((union ccb *)&cpi);
 2275         csa->callback(csa->callback_arg,
 2276                             AC_PATH_REGISTERED,
 2277                             &path, &cpi);
 2278         xpt_release_path(&path);
 2279 
 2280         return(1);
 2281 }
 2282 
 2283 void
 2284 xpt_action(union ccb *start_ccb)
 2285 {
 2286 
 2287         CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_action\n"));
 2288 
 2289         start_ccb->ccb_h.status = CAM_REQ_INPROG;
 2290         /* Compatibility for RL-unaware code. */
 2291         if (CAM_PRIORITY_TO_RL(start_ccb->ccb_h.pinfo.priority) == 0)
 2292             start_ccb->ccb_h.pinfo.priority += CAM_PRIORITY_NORMAL - 1;
 2293         (*(start_ccb->ccb_h.path->bus->xport->action))(start_ccb);
 2294 }
 2295 
 2296 void
 2297 xpt_action_default(union ccb *start_ccb)
 2298 {
 2299 
 2300         CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_action_default\n"));
 2301 
 2302         switch (start_ccb->ccb_h.func_code) {
 2303         case XPT_SCSI_IO:
 2304         {
 2305                 struct cam_ed *device;
 2306 #ifdef CAMDEBUG
 2307                 char cdb_str[(SCSI_MAX_CDBLEN * 3) + 1];
 2308                 struct cam_path *path;
 2309 
 2310                 path = start_ccb->ccb_h.path;
 2311 #endif
 2312 
 2313                 /*
 2314                  * For the sake of compatibility with SCSI-1
 2315                  * devices that may not understand the identify
 2316                  * message, we include lun information in the
 2317                  * second byte of all commands.  SCSI-1 specifies
 2318                  * that luns are a 3 bit value and reserves only 3
 2319                  * bits for lun information in the CDB.  Later
 2320                  * revisions of the SCSI spec allow for more than 8
 2321                  * luns, but have deprecated lun information in the
 2322                  * CDB.  So, if the lun won't fit, we must omit.
 2323                  *
 2324                  * Also be aware that during initial probing for devices,
 2325                  * the inquiry information is unknown but initialized to 0.
 2326                  * This means that this code will be exercised while probing
 2327                  * devices with an ANSI revision greater than 2.
 2328                  */
 2329                 device = start_ccb->ccb_h.path->device;
 2330                 if (device->protocol_version <= SCSI_REV_2
 2331                  && start_ccb->ccb_h.target_lun < 8
 2332                  && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) {
 2333 
 2334                         start_ccb->csio.cdb_io.cdb_bytes[1] |=
 2335                             start_ccb->ccb_h.target_lun << 5;
 2336                 }
 2337                 start_ccb->csio.scsi_status = SCSI_STATUS_OK;
 2338                 CAM_DEBUG(path, CAM_DEBUG_CDB,("%s. CDB: %s\n",
 2339                           scsi_op_desc(start_ccb->csio.cdb_io.cdb_bytes[0],
 2340                                        &path->device->inq_data),
 2341                           scsi_cdb_string(start_ccb->csio.cdb_io.cdb_bytes,
 2342                                           cdb_str, sizeof(cdb_str))));
 2343         }
 2344         /* FALLTHROUGH */
 2345         case XPT_TARGET_IO:
 2346         case XPT_CONT_TARGET_IO:
 2347                 start_ccb->csio.sense_resid = 0;
 2348                 start_ccb->csio.resid = 0;
 2349                 /* FALLTHROUGH */
 2350         case XPT_ATA_IO:
 2351                 if (start_ccb->ccb_h.func_code == XPT_ATA_IO) {
 2352                         start_ccb->ataio.resid = 0;
 2353                 }
 2354                 /* FALLTHROUGH */
 2355         case XPT_RESET_DEV:
 2356         case XPT_ENG_EXEC:
 2357         {
 2358                 struct cam_path *path = start_ccb->ccb_h.path;
 2359                 int frozen;
 2360 
 2361                 frozen = cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb);
 2362                 path->device->sim->devq->alloc_openings += frozen;
 2363                 if (frozen > 0)
 2364                         xpt_run_dev_allocq(path->bus);
 2365                 if (xpt_schedule_dev_sendq(path->bus, path->device))
 2366                         xpt_run_dev_sendq(path->bus);
 2367                 break;
 2368         }
 2369         case XPT_CALC_GEOMETRY:
 2370         {
 2371                 struct cam_sim *sim;
 2372 
 2373                 /* Filter out garbage */
 2374                 if (start_ccb->ccg.block_size == 0
 2375                  || start_ccb->ccg.volume_size == 0) {
 2376                         start_ccb->ccg.cylinders = 0;
 2377                         start_ccb->ccg.heads = 0;
 2378                         start_ccb->ccg.secs_per_track = 0;
 2379                         start_ccb->ccb_h.status = CAM_REQ_CMP;
 2380                         break;
 2381                 }
 2382 #ifdef PC98
 2383                 /*
 2384                  * In a PC-98 system, geometry translation depens on
 2385                  * the "real" device geometry obtained from mode page 4.
 2386                  * SCSI geometry translation is performed in the
 2387                  * initialization routine of the SCSI BIOS and the result
 2388                  * stored in host memory.  If the translation is available
 2389                  * in host memory, use it.  If not, rely on the default
 2390                  * translation the device driver performs.
 2391                  */
 2392                 if (scsi_da_bios_params(&start_ccb->ccg) != 0) {
 2393                         start_ccb->ccb_h.status = CAM_REQ_CMP;
 2394                         break;
 2395                 }
 2396 #endif
 2397                 sim = start_ccb->ccb_h.path->bus->sim;
 2398                 (*(sim->sim_action))(sim, start_ccb);
 2399                 break;
 2400         }
 2401         case XPT_ABORT:
 2402         {
 2403                 union ccb* abort_ccb;
 2404 
 2405                 abort_ccb = start_ccb->cab.abort_ccb;
 2406                 if (XPT_FC_IS_DEV_QUEUED(abort_ccb)) {
 2407 
 2408                         if (abort_ccb->ccb_h.pinfo.index >= 0) {
 2409                                 struct cam_ccbq *ccbq;
 2410                                 struct cam_ed *device;
 2411 
 2412                                 device = abort_ccb->ccb_h.path->device;
 2413                                 ccbq = &device->ccbq;
 2414                                 device->sim->devq->alloc_openings -= 
 2415                                     cam_ccbq_remove_ccb(ccbq, abort_ccb);
 2416                                 abort_ccb->ccb_h.status =
 2417                                     CAM_REQ_ABORTED|CAM_DEV_QFRZN;
 2418                                 xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
 2419                                 xpt_done(abort_ccb);
 2420                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 2421                                 break;
 2422                         }
 2423                         if (abort_ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX
 2424                          && (abort_ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) {
 2425                                 /*
 2426                                  * We've caught this ccb en route to
 2427                                  * the SIM.  Flag it for abort and the
 2428                                  * SIM will do so just before starting
 2429                                  * real work on the CCB.
 2430                                  */
 2431                                 abort_ccb->ccb_h.status =
 2432                                     CAM_REQ_ABORTED|CAM_DEV_QFRZN;
 2433                                 xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
 2434                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 2435                                 break;
 2436                         }
 2437                 }
 2438                 if (XPT_FC_IS_QUEUED(abort_ccb)
 2439                  && (abort_ccb->ccb_h.pinfo.index == CAM_DONEQ_INDEX)) {
 2440                         /*
 2441                          * It's already completed but waiting
 2442                          * for our SWI to get to it.
 2443                          */
 2444                         start_ccb->ccb_h.status = CAM_UA_ABORT;
 2445                         break;
 2446                 }
 2447                 /*
 2448                  * If we weren't able to take care of the abort request
 2449                  * in the XPT, pass the request down to the SIM for processing.
 2450                  */
 2451         }
 2452         /* FALLTHROUGH */
 2453         case XPT_ACCEPT_TARGET_IO:
 2454         case XPT_EN_LUN:
 2455         case XPT_IMMED_NOTIFY:
 2456         case XPT_NOTIFY_ACK:
 2457         case XPT_RESET_BUS:
 2458         case XPT_IMMEDIATE_NOTIFY:
 2459         case XPT_NOTIFY_ACKNOWLEDGE:
 2460         case XPT_GET_SIM_KNOB:
 2461         case XPT_SET_SIM_KNOB:
 2462         {
 2463                 struct cam_sim *sim;
 2464 
 2465                 sim = start_ccb->ccb_h.path->bus->sim;
 2466                 (*(sim->sim_action))(sim, start_ccb);
 2467                 break;
 2468         }
 2469         case XPT_PATH_INQ:
 2470         {
 2471                 struct cam_sim *sim;
 2472 
 2473                 sim = start_ccb->ccb_h.path->bus->sim;
 2474                 (*(sim->sim_action))(sim, start_ccb);
 2475                 break;
 2476         }
 2477         case XPT_PATH_STATS:
 2478                 start_ccb->cpis.last_reset =
 2479                         start_ccb->ccb_h.path->bus->last_reset;
 2480                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 2481                 break;
 2482         case XPT_GDEV_TYPE:
 2483         {
 2484                 struct cam_ed *dev;
 2485 
 2486                 dev = start_ccb->ccb_h.path->device;
 2487                 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
 2488                         start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
 2489                 } else {
 2490                         struct ccb_getdev *cgd;
 2491                         struct cam_eb *bus;
 2492                         struct cam_et *tar;
 2493 
 2494                         cgd = &start_ccb->cgd;
 2495                         bus = cgd->ccb_h.path->bus;
 2496                         tar = cgd->ccb_h.path->target;
 2497                         cgd->protocol = dev->protocol;
 2498                         cgd->inq_data = dev->inq_data;
 2499                         cgd->ident_data = dev->ident_data;
 2500                         cgd->inq_flags = dev->inq_flags;
 2501                         cgd->ccb_h.status = CAM_REQ_CMP;
 2502                         cgd->serial_num_len = dev->serial_num_len;
 2503                         if ((dev->serial_num_len > 0)
 2504                          && (dev->serial_num != NULL))
 2505                                 bcopy(dev->serial_num, cgd->serial_num,
 2506                                       dev->serial_num_len);
 2507                 }
 2508                 break;
 2509         }
 2510         case XPT_GDEV_STATS:
 2511         {
 2512                 struct cam_ed *dev;
 2513 
 2514                 dev = start_ccb->ccb_h.path->device;
 2515                 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
 2516                         start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
 2517                 } else {
 2518                         struct ccb_getdevstats *cgds;
 2519                         struct cam_eb *bus;
 2520                         struct cam_et *tar;
 2521 
 2522                         cgds = &start_ccb->cgds;
 2523                         bus = cgds->ccb_h.path->bus;
 2524                         tar = cgds->ccb_h.path->target;
 2525                         cgds->dev_openings = dev->ccbq.dev_openings;
 2526                         cgds->dev_active = dev->ccbq.dev_active;
 2527                         cgds->devq_openings = dev->ccbq.devq_openings;
 2528                         cgds->devq_queued = dev->ccbq.queue.entries;
 2529                         cgds->held = dev->ccbq.held;
 2530                         cgds->last_reset = tar->last_reset;
 2531                         cgds->maxtags = dev->maxtags;
 2532                         cgds->mintags = dev->mintags;
 2533                         if (timevalcmp(&tar->last_reset, &bus->last_reset, <))
 2534                                 cgds->last_reset = bus->last_reset;
 2535                         cgds->ccb_h.status = CAM_REQ_CMP;
 2536                 }
 2537                 break;
 2538         }
 2539         case XPT_GDEVLIST:
 2540         {
 2541                 struct cam_periph       *nperiph;
 2542                 struct periph_list      *periph_head;
 2543                 struct ccb_getdevlist   *cgdl;
 2544                 u_int                   i;
 2545                 struct cam_ed           *device;
 2546                 int                     found;
 2547 
 2548 
 2549                 found = 0;
 2550 
 2551                 /*
 2552                  * Don't want anyone mucking with our data.
 2553                  */
 2554                 device = start_ccb->ccb_h.path->device;
 2555                 periph_head = &device->periphs;
 2556                 cgdl = &start_ccb->cgdl;
 2557 
 2558                 /*
 2559                  * Check and see if the list has changed since the user
 2560                  * last requested a list member.  If so, tell them that the
 2561                  * list has changed, and therefore they need to start over
 2562                  * from the beginning.
 2563                  */
 2564                 if ((cgdl->index != 0) &&
 2565                     (cgdl->generation != device->generation)) {
 2566                         cgdl->status = CAM_GDEVLIST_LIST_CHANGED;
 2567                         break;
 2568                 }
 2569 
 2570                 /*
 2571                  * Traverse the list of peripherals and attempt to find
 2572                  * the requested peripheral.
 2573                  */
 2574                 for (nperiph = SLIST_FIRST(periph_head), i = 0;
 2575                      (nperiph != NULL) && (i <= cgdl->index);
 2576                      nperiph = SLIST_NEXT(nperiph, periph_links), i++) {
 2577                         if (i == cgdl->index) {
 2578                                 strncpy(cgdl->periph_name,
 2579                                         nperiph->periph_name,
 2580                                         DEV_IDLEN);
 2581                                 cgdl->unit_number = nperiph->unit_number;
 2582                                 found = 1;
 2583                         }
 2584                 }
 2585                 if (found == 0) {
 2586                         cgdl->status = CAM_GDEVLIST_ERROR;
 2587                         break;
 2588                 }
 2589 
 2590                 if (nperiph == NULL)
 2591                         cgdl->status = CAM_GDEVLIST_LAST_DEVICE;
 2592                 else
 2593                         cgdl->status = CAM_GDEVLIST_MORE_DEVS;
 2594 
 2595                 cgdl->index++;
 2596                 cgdl->generation = device->generation;
 2597 
 2598                 cgdl->ccb_h.status = CAM_REQ_CMP;
 2599                 break;
 2600         }
 2601         case XPT_DEV_MATCH:
 2602         {
 2603                 dev_pos_type position_type;
 2604                 struct ccb_dev_match *cdm;
 2605 
 2606                 cdm = &start_ccb->cdm;
 2607 
 2608                 /*
 2609                  * There are two ways of getting at information in the EDT.
 2610                  * The first way is via the primary EDT tree.  It starts
 2611                  * with a list of busses, then a list of targets on a bus,
 2612                  * then devices/luns on a target, and then peripherals on a
 2613                  * device/lun.  The "other" way is by the peripheral driver
 2614                  * lists.  The peripheral driver lists are organized by
 2615                  * peripheral driver.  (obviously)  So it makes sense to
 2616                  * use the peripheral driver list if the user is looking
 2617                  * for something like "da1", or all "da" devices.  If the
 2618                  * user is looking for something on a particular bus/target
 2619                  * or lun, it's generally better to go through the EDT tree.
 2620                  */
 2621 
 2622                 if (cdm->pos.position_type != CAM_DEV_POS_NONE)
 2623                         position_type = cdm->pos.position_type;
 2624                 else {
 2625                         u_int i;
 2626 
 2627                         position_type = CAM_DEV_POS_NONE;
 2628 
 2629                         for (i = 0; i < cdm->num_patterns; i++) {
 2630                                 if ((cdm->patterns[i].type == DEV_MATCH_BUS)
 2631                                  ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){
 2632                                         position_type = CAM_DEV_POS_EDT;
 2633                                         break;
 2634                                 }
 2635                         }
 2636 
 2637                         if (cdm->num_patterns == 0)
 2638                                 position_type = CAM_DEV_POS_EDT;
 2639                         else if (position_type == CAM_DEV_POS_NONE)
 2640                                 position_type = CAM_DEV_POS_PDRV;
 2641                 }
 2642 
 2643                 switch(position_type & CAM_DEV_POS_TYPEMASK) {
 2644                 case CAM_DEV_POS_EDT:
 2645                         xptedtmatch(cdm);
 2646                         break;
 2647                 case CAM_DEV_POS_PDRV:
 2648                         xptperiphlistmatch(cdm);
 2649                         break;
 2650                 default:
 2651                         cdm->status = CAM_DEV_MATCH_ERROR;
 2652                         break;
 2653                 }
 2654 
 2655                 if (cdm->status == CAM_DEV_MATCH_ERROR)
 2656                         start_ccb->ccb_h.status = CAM_REQ_CMP_ERR;
 2657                 else
 2658                         start_ccb->ccb_h.status = CAM_REQ_CMP;
 2659 
 2660                 break;
 2661         }
 2662         case XPT_SASYNC_CB:
 2663         {
 2664                 struct ccb_setasync *csa;
 2665                 struct async_node *cur_entry;
 2666                 struct async_list *async_head;
 2667                 u_int32_t added;
 2668 
 2669                 csa = &start_ccb->csa;
 2670                 added = csa->event_enable;
 2671                 async_head = &csa->ccb_h.path->device->asyncs;
 2672 
 2673                 /*
 2674                  * If there is already an entry for us, simply
 2675                  * update it.
 2676                  */
 2677                 cur_entry = SLIST_FIRST(async_head);
 2678                 while (cur_entry != NULL) {
 2679                         if ((cur_entry->callback_arg == csa->callback_arg)
 2680                          && (cur_entry->callback == csa->callback))
 2681                                 break;
 2682                         cur_entry = SLIST_NEXT(cur_entry, links);
 2683                 }
 2684 
 2685                 if (cur_entry != NULL) {
 2686                         /*
 2687                          * If the request has no flags set,
 2688                          * remove the entry.
 2689                          */
 2690                         added &= ~cur_entry->event_enable;
 2691                         if (csa->event_enable == 0) {
 2692                                 SLIST_REMOVE(async_head, cur_entry,
 2693                                              async_node, links);
 2694                                 xpt_release_device(csa->ccb_h.path->device);
 2695                                 free(cur_entry, M_CAMXPT);
 2696                         } else {
 2697                                 cur_entry->event_enable = csa->event_enable;
 2698                         }
 2699                         csa->event_enable = added;
 2700                 } else {
 2701                         cur_entry = malloc(sizeof(*cur_entry), M_CAMXPT,
 2702                                            M_NOWAIT);
 2703                         if (cur_entry == NULL) {
 2704                                 csa->ccb_h.status = CAM_RESRC_UNAVAIL;
 2705                                 break;
 2706                         }
 2707                         cur_entry->event_enable = csa->event_enable;
 2708                         cur_entry->callback_arg = csa->callback_arg;
 2709                         cur_entry->callback = csa->callback;
 2710                         SLIST_INSERT_HEAD(async_head, cur_entry, links);
 2711                         xpt_acquire_device(csa->ccb_h.path->device);
 2712                 }
 2713                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 2714                 break;
 2715         }
 2716         case XPT_REL_SIMQ:
 2717         {
 2718                 struct ccb_relsim *crs;
 2719                 struct cam_ed *dev;
 2720 
 2721                 crs = &start_ccb->crs;
 2722                 dev = crs->ccb_h.path->device;
 2723                 if (dev == NULL) {
 2724 
 2725                         crs->ccb_h.status = CAM_DEV_NOT_THERE;
 2726                         break;
 2727                 }
 2728 
 2729                 if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) {
 2730 
 2731                         if (INQ_DATA_TQ_ENABLED(&dev->inq_data)) {
 2732                                 /* Don't ever go below one opening */
 2733                                 if (crs->openings > 0) {
 2734                                         xpt_dev_ccbq_resize(crs->ccb_h.path,
 2735                                                             crs->openings);
 2736 
 2737                                         if (bootverbose) {
 2738                                                 xpt_print(crs->ccb_h.path,
 2739                                                     "tagged openings now %d\n",
 2740                                                     crs->openings);
 2741                                         }
 2742                                 }
 2743                         }
 2744                 }
 2745 
 2746                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) {
 2747 
 2748                         if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
 2749 
 2750                                 /*
 2751                                  * Just extend the old timeout and decrement
 2752                                  * the freeze count so that a single timeout
 2753                                  * is sufficient for releasing the queue.
 2754                                  */
 2755                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
 2756                                 callout_stop(&dev->callout);
 2757                         } else {
 2758 
 2759                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
 2760                         }
 2761 
 2762                         callout_reset(&dev->callout,
 2763                             (crs->release_timeout * hz) / 1000,
 2764                             xpt_release_devq_timeout, dev);
 2765 
 2766                         dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING;
 2767 
 2768                 }
 2769 
 2770                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) {
 2771 
 2772                         if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) {
 2773                                 /*
 2774                                  * Decrement the freeze count so that a single
 2775                                  * completion is still sufficient to unfreeze
 2776                                  * the queue.
 2777                                  */
 2778                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
 2779                         } else {
 2780 
 2781                                 dev->flags |= CAM_DEV_REL_ON_COMPLETE;
 2782                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
 2783                         }
 2784                 }
 2785 
 2786                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) {
 2787 
 2788                         if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
 2789                          || (dev->ccbq.dev_active == 0)) {
 2790 
 2791                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
 2792                         } else {
 2793 
 2794                                 dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY;
 2795                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
 2796                         }
 2797                 }
 2798 
 2799                 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0) {
 2800                         xpt_release_devq_rl(crs->ccb_h.path, /*runlevel*/
 2801                             (crs->release_flags & RELSIM_RELEASE_RUNLEVEL) ?
 2802                                 crs->release_timeout : 0,
 2803                             /*count*/1, /*run_queue*/TRUE);
 2804                 }
 2805                 start_ccb->crs.qfrozen_cnt = dev->ccbq.queue.qfrozen_cnt[0];
 2806                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 2807                 break;
 2808         }
 2809         case XPT_DEBUG: {
 2810 #ifdef CAMDEBUG
 2811 #ifdef CAM_DEBUG_DELAY
 2812                 cam_debug_delay = CAM_DEBUG_DELAY;
 2813 #endif
 2814                 cam_dflags = start_ccb->cdbg.flags;
 2815                 if (cam_dpath != NULL) {
 2816                         xpt_free_path(cam_dpath);
 2817                         cam_dpath = NULL;
 2818                 }
 2819 
 2820                 if (cam_dflags != CAM_DEBUG_NONE) {
 2821                         if (xpt_create_path(&cam_dpath, xpt_periph,
 2822                                             start_ccb->ccb_h.path_id,
 2823                                             start_ccb->ccb_h.target_id,
 2824                                             start_ccb->ccb_h.target_lun) !=
 2825                                             CAM_REQ_CMP) {
 2826                                 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
 2827                                 cam_dflags = CAM_DEBUG_NONE;
 2828                         } else {
 2829                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 2830                                 xpt_print(cam_dpath, "debugging flags now %x\n",
 2831                                     cam_dflags);
 2832                         }
 2833                 } else {
 2834                         cam_dpath = NULL;
 2835                         start_ccb->ccb_h.status = CAM_REQ_CMP;
 2836                 }
 2837 #else /* !CAMDEBUG */
 2838                 start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
 2839 #endif /* CAMDEBUG */
 2840                 break;
 2841         }
 2842         case XPT_FREEZE_QUEUE:
 2843         {
 2844                 struct ccb_relsim *crs = &start_ccb->crs;
 2845 
 2846                 xpt_freeze_devq_rl(crs->ccb_h.path, /*runlevel*/
 2847                     (crs->release_flags & RELSIM_RELEASE_RUNLEVEL) ?
 2848                     crs->release_timeout : 0, /*count*/1);
 2849                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 2850                 break;
 2851         }
 2852         case XPT_NOOP:
 2853                 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0)
 2854                         xpt_freeze_devq(start_ccb->ccb_h.path, 1);
 2855                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 2856                 break;
 2857         default:
 2858         case XPT_SDEV_TYPE:
 2859         case XPT_TERM_IO:
 2860         case XPT_ENG_INQ:
 2861                 /* XXX Implement */
 2862                 start_ccb->ccb_h.status = CAM_PROVIDE_FAIL;
 2863                 if (start_ccb->ccb_h.func_code & XPT_FC_DEV_QUEUED) {
 2864                         xpt_done(start_ccb);
 2865                 }
 2866                 break;
 2867         }
 2868 }
 2869 
 2870 void
 2871 xpt_polled_action(union ccb *start_ccb)
 2872 {
 2873         u_int32_t timeout;
 2874         struct    cam_sim *sim;
 2875         struct    cam_devq *devq;
 2876         struct    cam_ed *dev;
 2877 
 2878 
 2879         timeout = start_ccb->ccb_h.timeout * 10;
 2880         sim = start_ccb->ccb_h.path->bus->sim;
 2881         devq = sim->devq;
 2882         dev = start_ccb->ccb_h.path->device;
 2883 
 2884         mtx_assert(sim->mtx, MA_OWNED);
 2885 
 2886         /*
 2887          * Steal an opening so that no other queued requests
 2888          * can get it before us while we simulate interrupts.
 2889          */
 2890         dev->ccbq.devq_openings--;
 2891         dev->ccbq.dev_openings--;
 2892 
 2893         while(((devq != NULL && devq->send_openings <= 0) ||
 2894            dev->ccbq.dev_openings < 0) && (--timeout > 0)) {
 2895                 DELAY(100);
 2896                 (*(sim->sim_poll))(sim);
 2897                 camisr_runqueue(&sim->sim_doneq);
 2898         }
 2899 
 2900         dev->ccbq.devq_openings++;
 2901         dev->ccbq.dev_openings++;
 2902 
 2903         if (timeout != 0) {
 2904                 xpt_action(start_ccb);
 2905                 while(--timeout > 0) {
 2906                         (*(sim->sim_poll))(sim);
 2907                         camisr_runqueue(&sim->sim_doneq);
 2908                         if ((start_ccb->ccb_h.status  & CAM_STATUS_MASK)
 2909                             != CAM_REQ_INPROG)
 2910                                 break;
 2911                         DELAY(100);
 2912                 }
 2913                 if (timeout == 0) {
 2914                         /*
 2915                          * XXX Is it worth adding a sim_timeout entry
 2916                          * point so we can attempt recovery?  If
 2917                          * this is only used for dumps, I don't think
 2918                          * it is.
 2919                          */
 2920                         start_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
 2921                 }
 2922         } else {
 2923                 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
 2924         }
 2925 }
 2926 
 2927 /*
 2928  * Schedule a peripheral driver to receive a ccb when it's
 2929  * target device has space for more transactions.
 2930  */
 2931 void
 2932 xpt_schedule(struct cam_periph *perph, u_int32_t new_priority)
 2933 {
 2934         struct cam_ed *device;
 2935         int runq = 0;
 2936 
 2937         mtx_assert(perph->sim->mtx, MA_OWNED);
 2938 
 2939         CAM_DEBUG(perph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n"));
 2940         device = perph->path->device;
 2941         if (periph_is_queued(perph)) {
 2942                 /* Simply reorder based on new priority */
 2943                 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
 2944                           ("   change priority to %d\n", new_priority));
 2945                 if (new_priority < perph->pinfo.priority) {
 2946                         camq_change_priority(&device->drvq,
 2947                                              perph->pinfo.index,
 2948                                              new_priority);
 2949                         runq = xpt_schedule_dev_allocq(perph->path->bus, device);
 2950                 }
 2951         } else {
 2952                 /* New entry on the queue */
 2953                 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
 2954                           ("   added periph to queue\n"));
 2955                 perph->pinfo.priority = new_priority;
 2956                 perph->pinfo.generation = ++device->drvq.generation;
 2957                 camq_insert(&device->drvq, &perph->pinfo);
 2958                 runq = xpt_schedule_dev_allocq(perph->path->bus, device);
 2959         }
 2960         if (runq != 0) {
 2961                 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
 2962                           ("   calling xpt_run_devq\n"));
 2963                 xpt_run_dev_allocq(perph->path->bus);
 2964         }
 2965 }
 2966 
 2967 
 2968 /*
 2969  * Schedule a device to run on a given queue.
 2970  * If the device was inserted as a new entry on the queue,
 2971  * return 1 meaning the device queue should be run. If we
 2972  * were already queued, implying someone else has already
 2973  * started the queue, return 0 so the caller doesn't attempt
 2974  * to run the queue.
 2975  */
 2976 int
 2977 xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo,
 2978                  u_int32_t new_priority)
 2979 {
 2980         int retval;
 2981         u_int32_t old_priority;
 2982 
 2983         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_schedule_dev\n"));
 2984 
 2985         old_priority = pinfo->priority;
 2986 
 2987         /*
 2988          * Are we already queued?
 2989          */
 2990         if (pinfo->index != CAM_UNQUEUED_INDEX) {
 2991                 /* Simply reorder based on new priority */
 2992                 if (new_priority < old_priority) {
 2993                         camq_change_priority(queue, pinfo->index,
 2994                                              new_priority);
 2995                         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 2996                                         ("changed priority to %d\n",
 2997                                          new_priority));
 2998                         retval = 1;
 2999                 } else
 3000                         retval = 0;
 3001         } else {
 3002                 /* New entry on the queue */
 3003                 if (new_priority < old_priority)
 3004                         pinfo->priority = new_priority;
 3005 
 3006                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3007                                 ("Inserting onto queue\n"));
 3008                 pinfo->generation = ++queue->generation;
 3009                 camq_insert(queue, pinfo);
 3010                 retval = 1;
 3011         }
 3012         return (retval);
 3013 }
 3014 
 3015 static void
 3016 xpt_run_dev_allocq(struct cam_eb *bus)
 3017 {
 3018         struct  cam_devq *devq;
 3019 
 3020         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_allocq\n"));
 3021         devq = bus->sim->devq;
 3022 
 3023         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3024                         ("   qfrozen_cnt == 0x%x, entries == %d, "
 3025                          "openings == %d, active == %d\n",
 3026                          devq->alloc_queue.qfrozen_cnt[0],
 3027                          devq->alloc_queue.entries,
 3028                          devq->alloc_openings,
 3029                          devq->alloc_active));
 3030 
 3031         devq->alloc_queue.qfrozen_cnt[0]++;
 3032         while ((devq->alloc_queue.entries > 0)
 3033             && (devq->alloc_openings > 0)
 3034             && (devq->alloc_queue.qfrozen_cnt[0] <= 1)) {
 3035                 struct  cam_ed_qinfo *qinfo;
 3036                 struct  cam_ed *device;
 3037                 union   ccb *work_ccb;
 3038                 struct  cam_periph *drv;
 3039                 struct  camq *drvq;
 3040 
 3041                 qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->alloc_queue,
 3042                                                            CAMQ_HEAD);
 3043                 device = qinfo->device;
 3044                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3045                                 ("running device %p\n", device));
 3046 
 3047                 drvq = &device->drvq;
 3048 
 3049 #ifdef CAMDEBUG
 3050                 if (drvq->entries <= 0) {
 3051                         panic("xpt_run_dev_allocq: "
 3052                               "Device on queue without any work to do");
 3053                 }
 3054 #endif
 3055                 if ((work_ccb = xpt_get_ccb(device)) != NULL) {
 3056                         devq->alloc_openings--;
 3057                         devq->alloc_active++;
 3058                         drv = (struct cam_periph*)camq_remove(drvq, CAMQ_HEAD);
 3059                         xpt_setup_ccb(&work_ccb->ccb_h, drv->path,
 3060                                       drv->pinfo.priority);
 3061                         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3062                                         ("calling periph start\n"));
 3063                         drv->periph_start(drv, work_ccb);
 3064                 } else {
 3065                         /*
 3066                          * Malloc failure in alloc_ccb
 3067                          */
 3068                         /*
 3069                          * XXX add us to a list to be run from free_ccb
 3070                          * if we don't have any ccbs active on this
 3071                          * device queue otherwise we may never get run
 3072                          * again.
 3073                          */
 3074                         break;
 3075                 }
 3076 
 3077                 /* We may have more work. Attempt to reschedule. */
 3078                 xpt_schedule_dev_allocq(bus, device);
 3079         }
 3080         devq->alloc_queue.qfrozen_cnt[0]--;
 3081 }
 3082 
 3083 static void
 3084 xpt_run_dev_sendq(struct cam_eb *bus)
 3085 {
 3086         struct  cam_devq *devq;
 3087 
 3088         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_sendq\n"));
 3089 
 3090         devq = bus->sim->devq;
 3091 
 3092         devq->send_queue.qfrozen_cnt[0]++;
 3093         while ((devq->send_queue.entries > 0)
 3094             && (devq->send_openings > 0)
 3095             && (devq->send_queue.qfrozen_cnt[0] <= 1)) {
 3096                 struct  cam_ed_qinfo *qinfo;
 3097                 struct  cam_ed *device;
 3098                 union ccb *work_ccb;
 3099                 struct  cam_sim *sim;
 3100 
 3101                 qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->send_queue,
 3102                                                            CAMQ_HEAD);
 3103                 device = qinfo->device;
 3104                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3105                                 ("running device %p\n", device));
 3106 
 3107                 work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD);
 3108                 if (work_ccb == NULL) {
 3109                         printf("device on run queue with no ccbs???\n");
 3110                         continue;
 3111                 }
 3112 
 3113                 if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) {
 3114 
 3115                         mtx_lock(&xsoftc.xpt_lock);
 3116                         if (xsoftc.num_highpower <= 0) {
 3117                                 /*
 3118                                  * We got a high power command, but we
 3119                                  * don't have any available slots.  Freeze
 3120                                  * the device queue until we have a slot
 3121                                  * available.
 3122                                  */
 3123                                 xpt_freeze_devq(work_ccb->ccb_h.path, 1);
 3124                                 STAILQ_INSERT_TAIL(&xsoftc.highpowerq,
 3125                                                    &work_ccb->ccb_h,
 3126                                                    xpt_links.stqe);
 3127 
 3128                                 mtx_unlock(&xsoftc.xpt_lock);
 3129                                 continue;
 3130                         } else {
 3131                                 /*
 3132                                  * Consume a high power slot while
 3133                                  * this ccb runs.
 3134                                  */
 3135                                 xsoftc.num_highpower--;
 3136                         }
 3137                         mtx_unlock(&xsoftc.xpt_lock);
 3138                 }
 3139                 cam_ccbq_remove_ccb(&device->ccbq, work_ccb);
 3140                 cam_ccbq_send_ccb(&device->ccbq, work_ccb);
 3141 
 3142                 devq->send_openings--;
 3143                 devq->send_active++;
 3144 
 3145                 xpt_schedule_dev_sendq(bus, device);
 3146 
 3147                 if (work_ccb && (work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0){
 3148                         /*
 3149                          * The client wants to freeze the queue
 3150                          * after this CCB is sent.
 3151                          */
 3152                         xpt_freeze_devq(work_ccb->ccb_h.path, 1);
 3153                 }
 3154 
 3155                 /* In Target mode, the peripheral driver knows best... */
 3156                 if (work_ccb->ccb_h.func_code == XPT_SCSI_IO) {
 3157                         if ((device->inq_flags & SID_CmdQue) != 0
 3158                          && work_ccb->csio.tag_action != CAM_TAG_ACTION_NONE)
 3159                                 work_ccb->ccb_h.flags |= CAM_TAG_ACTION_VALID;
 3160                         else
 3161                                 /*
 3162                                  * Clear this in case of a retried CCB that
 3163                                  * failed due to a rejected tag.
 3164                                  */
 3165                                 work_ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
 3166                 }
 3167 
 3168                 /*
 3169                  * Device queues can be shared among multiple sim instances
 3170                  * that reside on different busses.  Use the SIM in the queue
 3171                  * CCB's path, rather than the one in the bus that was passed
 3172                  * into this function.
 3173                  */
 3174                 sim = work_ccb->ccb_h.path->bus->sim;
 3175                 (*(sim->sim_action))(sim, work_ccb);
 3176         }
 3177         devq->send_queue.qfrozen_cnt[0]--;
 3178 }
 3179 
 3180 /*
 3181  * This function merges stuff from the slave ccb into the master ccb, while
 3182  * keeping important fields in the master ccb constant.
 3183  */
 3184 void
 3185 xpt_merge_ccb(union ccb *master_ccb, union ccb *slave_ccb)
 3186 {
 3187 
 3188         /*
 3189          * Pull fields that are valid for peripheral drivers to set
 3190          * into the master CCB along with the CCB "payload".
 3191          */
 3192         master_ccb->ccb_h.retry_count = slave_ccb->ccb_h.retry_count;
 3193         master_ccb->ccb_h.func_code = slave_ccb->ccb_h.func_code;
 3194         master_ccb->ccb_h.timeout = slave_ccb->ccb_h.timeout;
 3195         master_ccb->ccb_h.flags = slave_ccb->ccb_h.flags;
 3196         bcopy(&(&slave_ccb->ccb_h)[1], &(&master_ccb->ccb_h)[1],
 3197               sizeof(union ccb) - sizeof(struct ccb_hdr));
 3198 }
 3199 
 3200 void
 3201 xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority)
 3202 {
 3203 
 3204         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_setup_ccb\n"));
 3205         ccb_h->pinfo.priority = priority;
 3206         ccb_h->path = path;
 3207         ccb_h->path_id = path->bus->path_id;
 3208         if (path->target)
 3209                 ccb_h->target_id = path->target->target_id;
 3210         else
 3211                 ccb_h->target_id = CAM_TARGET_WILDCARD;
 3212         if (path->device) {
 3213                 ccb_h->target_lun = path->device->lun_id;
 3214                 ccb_h->pinfo.generation = ++path->device->ccbq.queue.generation;
 3215         } else {
 3216                 ccb_h->target_lun = CAM_TARGET_WILDCARD;
 3217         }
 3218         ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
 3219         ccb_h->flags = 0;
 3220 }
 3221 
 3222 /* Path manipulation functions */
 3223 cam_status
 3224 xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph,
 3225                 path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
 3226 {
 3227         struct     cam_path *path;
 3228         cam_status status;
 3229 
 3230         path = (struct cam_path *)malloc(sizeof(*path), M_CAMXPT, M_NOWAIT);
 3231 
 3232         if (path == NULL) {
 3233                 status = CAM_RESRC_UNAVAIL;
 3234                 return(status);
 3235         }
 3236         status = xpt_compile_path(path, perph, path_id, target_id, lun_id);
 3237         if (status != CAM_REQ_CMP) {
 3238                 free(path, M_CAMXPT);
 3239                 path = NULL;
 3240         }
 3241         *new_path_ptr = path;
 3242         return (status);
 3243 }
 3244 
 3245 cam_status
 3246 xpt_create_path_unlocked(struct cam_path **new_path_ptr,
 3247                          struct cam_periph *periph, path_id_t path_id,
 3248                          target_id_t target_id, lun_id_t lun_id)
 3249 {
 3250         struct     cam_path *path;
 3251         struct     cam_eb *bus = NULL;
 3252         cam_status status;
 3253         int        need_unlock = 0;
 3254 
 3255         path = (struct cam_path *)malloc(sizeof(*path), M_CAMXPT, M_WAITOK);
 3256 
 3257         if (path_id != CAM_BUS_WILDCARD) {
 3258                 bus = xpt_find_bus(path_id);
 3259                 if (bus != NULL) {
 3260                         need_unlock = 1;
 3261                         CAM_SIM_LOCK(bus->sim);
 3262                 }
 3263         }
 3264         status = xpt_compile_path(path, periph, path_id, target_id, lun_id);
 3265         if (need_unlock)
 3266                 CAM_SIM_UNLOCK(bus->sim);
 3267         if (status != CAM_REQ_CMP) {
 3268                 free(path, M_CAMXPT);
 3269                 path = NULL;
 3270         }
 3271         *new_path_ptr = path;
 3272         return (status);
 3273 }
 3274 
 3275 cam_status
 3276 xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph,
 3277                  path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
 3278 {
 3279         struct       cam_eb *bus;
 3280         struct       cam_et *target;
 3281         struct       cam_ed *device;
 3282         cam_status   status;
 3283 
 3284         status = CAM_REQ_CMP;   /* Completed without error */
 3285         target = NULL;          /* Wildcarded */
 3286         device = NULL;          /* Wildcarded */
 3287 
 3288         /*
 3289          * We will potentially modify the EDT, so block interrupts
 3290          * that may attempt to create cam paths.
 3291          */
 3292         bus = xpt_find_bus(path_id);
 3293         if (bus == NULL) {
 3294                 status = CAM_PATH_INVALID;
 3295         } else {
 3296                 target = xpt_find_target(bus, target_id);
 3297                 if (target == NULL) {
 3298                         /* Create one */
 3299                         struct cam_et *new_target;
 3300 
 3301                         new_target = xpt_alloc_target(bus, target_id);
 3302                         if (new_target == NULL) {
 3303                                 status = CAM_RESRC_UNAVAIL;
 3304                         } else {
 3305                                 target = new_target;
 3306                         }
 3307                 }
 3308                 if (target != NULL) {
 3309                         device = xpt_find_device(target, lun_id);
 3310                         if (device == NULL) {
 3311                                 /* Create one */
 3312                                 struct cam_ed *new_device;
 3313 
 3314                                 new_device =
 3315                                     (*(bus->xport->alloc_device))(bus,
 3316                                                                       target,
 3317                                                                       lun_id);
 3318                                 if (new_device == NULL) {
 3319                                         status = CAM_RESRC_UNAVAIL;
 3320                                 } else {
 3321                                         device = new_device;
 3322                                 }
 3323                         }
 3324                 }
 3325         }
 3326 
 3327         /*
 3328          * Only touch the user's data if we are successful.
 3329          */
 3330         if (status == CAM_REQ_CMP) {
 3331                 new_path->periph = perph;
 3332                 new_path->bus = bus;
 3333                 new_path->target = target;
 3334                 new_path->device = device;
 3335                 CAM_DEBUG(new_path, CAM_DEBUG_TRACE, ("xpt_compile_path\n"));
 3336         } else {
 3337                 if (device != NULL)
 3338                         xpt_release_device(device);
 3339                 if (target != NULL)
 3340                         xpt_release_target(target);
 3341                 if (bus != NULL)
 3342                         xpt_release_bus(bus);
 3343         }
 3344         return (status);
 3345 }
 3346 
 3347 void
 3348 xpt_release_path(struct cam_path *path)
 3349 {
 3350         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_path\n"));
 3351         if (path->device != NULL) {
 3352                 xpt_release_device(path->device);
 3353                 path->device = NULL;
 3354         }
 3355         if (path->target != NULL) {
 3356                 xpt_release_target(path->target);
 3357                 path->target = NULL;
 3358         }
 3359         if (path->bus != NULL) {
 3360                 xpt_release_bus(path->bus);
 3361                 path->bus = NULL;
 3362         }
 3363 }
 3364 
 3365 void
 3366 xpt_free_path(struct cam_path *path)
 3367 {
 3368 
 3369         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_free_path\n"));
 3370         xpt_release_path(path);
 3371         free(path, M_CAMXPT);
 3372 }
 3373 
 3374 
 3375 /*
 3376  * Return -1 for failure, 0 for exact match, 1 for match with wildcards
 3377  * in path1, 2 for match with wildcards in path2.
 3378  */
 3379 int
 3380 xpt_path_comp(struct cam_path *path1, struct cam_path *path2)
 3381 {
 3382         int retval = 0;
 3383 
 3384         if (path1->bus != path2->bus) {
 3385                 if (path1->bus->path_id == CAM_BUS_WILDCARD)
 3386                         retval = 1;
 3387                 else if (path2->bus->path_id == CAM_BUS_WILDCARD)
 3388                         retval = 2;
 3389                 else
 3390                         return (-1);
 3391         }
 3392         if (path1->target != path2->target) {
 3393                 if (path1->target->target_id == CAM_TARGET_WILDCARD) {
 3394                         if (retval == 0)
 3395                                 retval = 1;
 3396                 } else if (path2->target->target_id == CAM_TARGET_WILDCARD)
 3397                         retval = 2;
 3398                 else
 3399                         return (-1);
 3400         }
 3401         if (path1->device != path2->device) {
 3402                 if (path1->device->lun_id == CAM_LUN_WILDCARD) {
 3403                         if (retval == 0)
 3404                                 retval = 1;
 3405                 } else if (path2->device->lun_id == CAM_LUN_WILDCARD)
 3406                         retval = 2;
 3407                 else
 3408                         return (-1);
 3409         }
 3410         return (retval);
 3411 }
 3412 
 3413 void
 3414 xpt_print_path(struct cam_path *path)
 3415 {
 3416 
 3417         if (path == NULL)
 3418                 printf("(nopath): ");
 3419         else {
 3420                 if (path->periph != NULL)
 3421                         printf("(%s%d:", path->periph->periph_name,
 3422                                path->periph->unit_number);
 3423                 else
 3424                         printf("(noperiph:");
 3425 
 3426                 if (path->bus != NULL)
 3427                         printf("%s%d:%d:", path->bus->sim->sim_name,
 3428                                path->bus->sim->unit_number,
 3429                                path->bus->sim->bus_id);
 3430                 else
 3431                         printf("nobus:");
 3432 
 3433                 if (path->target != NULL)
 3434                         printf("%d:", path->target->target_id);
 3435                 else
 3436                         printf("X:");
 3437 
 3438                 if (path->device != NULL)
 3439                         printf("%d): ", path->device->lun_id);
 3440                 else
 3441                         printf("X): ");
 3442         }
 3443 }
 3444 
 3445 void
 3446 xpt_print(struct cam_path *path, const char *fmt, ...)
 3447 {
 3448         va_list ap;
 3449         xpt_print_path(path);
 3450         va_start(ap, fmt);
 3451         vprintf(fmt, ap);
 3452         va_end(ap);
 3453 }
 3454 
 3455 int
 3456 xpt_path_string(struct cam_path *path, char *str, size_t str_len)
 3457 {
 3458         struct sbuf sb;
 3459 
 3460 #ifdef INVARIANTS
 3461         if (path != NULL && path->bus != NULL)
 3462                 mtx_assert(path->bus->sim->mtx, MA_OWNED);
 3463 #endif
 3464 
 3465         sbuf_new(&sb, str, str_len, 0);
 3466 
 3467         if (path == NULL)
 3468                 sbuf_printf(&sb, "(nopath): ");
 3469         else {
 3470                 if (path->periph != NULL)
 3471                         sbuf_printf(&sb, "(%s%d:", path->periph->periph_name,
 3472                                     path->periph->unit_number);
 3473                 else
 3474                         sbuf_printf(&sb, "(noperiph:");
 3475 
 3476                 if (path->bus != NULL)
 3477                         sbuf_printf(&sb, "%s%d:%d:", path->bus->sim->sim_name,
 3478                                     path->bus->sim->unit_number,
 3479                                     path->bus->sim->bus_id);
 3480                 else
 3481                         sbuf_printf(&sb, "nobus:");
 3482 
 3483                 if (path->target != NULL)
 3484                         sbuf_printf(&sb, "%d:", path->target->target_id);
 3485                 else
 3486                         sbuf_printf(&sb, "X:");
 3487 
 3488                 if (path->device != NULL)
 3489                         sbuf_printf(&sb, "%d): ", path->device->lun_id);
 3490                 else
 3491                         sbuf_printf(&sb, "X): ");
 3492         }
 3493         sbuf_finish(&sb);
 3494 
 3495         return(sbuf_len(&sb));
 3496 }
 3497 
 3498 path_id_t
 3499 xpt_path_path_id(struct cam_path *path)
 3500 {
 3501         mtx_assert(path->bus->sim->mtx, MA_OWNED);
 3502 
 3503         return(path->bus->path_id);
 3504 }
 3505 
 3506 target_id_t
 3507 xpt_path_target_id(struct cam_path *path)
 3508 {
 3509         mtx_assert(path->bus->sim->mtx, MA_OWNED);
 3510 
 3511         if (path->target != NULL)
 3512                 return (path->target->target_id);
 3513         else
 3514                 return (CAM_TARGET_WILDCARD);
 3515 }
 3516 
 3517 lun_id_t
 3518 xpt_path_lun_id(struct cam_path *path)
 3519 {
 3520         mtx_assert(path->bus->sim->mtx, MA_OWNED);
 3521 
 3522         if (path->device != NULL)
 3523                 return (path->device->lun_id);
 3524         else
 3525                 return (CAM_LUN_WILDCARD);
 3526 }
 3527 
 3528 struct cam_sim *
 3529 xpt_path_sim(struct cam_path *path)
 3530 {
 3531 
 3532         return (path->bus->sim);
 3533 }
 3534 
 3535 struct cam_periph*
 3536 xpt_path_periph(struct cam_path *path)
 3537 {
 3538         mtx_assert(path->bus->sim->mtx, MA_OWNED);
 3539 
 3540         return (path->periph);
 3541 }
 3542 
 3543 /*
 3544  * Release a CAM control block for the caller.  Remit the cost of the structure
 3545  * to the device referenced by the path.  If the this device had no 'credits'
 3546  * and peripheral drivers have registered async callbacks for this notification
 3547  * call them now.
 3548  */
 3549 void
 3550 xpt_release_ccb(union ccb *free_ccb)
 3551 {
 3552         struct   cam_path *path;
 3553         struct   cam_ed *device;
 3554         struct   cam_eb *bus;
 3555         struct   cam_sim *sim;
 3556 
 3557         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_release_ccb\n"));
 3558         path = free_ccb->ccb_h.path;
 3559         device = path->device;
 3560         bus = path->bus;
 3561         sim = bus->sim;
 3562 
 3563         mtx_assert(sim->mtx, MA_OWNED);
 3564 
 3565         cam_ccbq_release_opening(&device->ccbq);
 3566         if (device->flags & CAM_DEV_RESIZE_QUEUE_NEEDED) {
 3567                 device->flags &= ~CAM_DEV_RESIZE_QUEUE_NEEDED;
 3568                 cam_ccbq_resize(&device->ccbq,
 3569                     device->ccbq.dev_openings + device->ccbq.dev_active);
 3570         }
 3571         if (sim->ccb_count > sim->max_ccbs) {
 3572                 xpt_free_ccb(free_ccb);
 3573                 sim->ccb_count--;
 3574         } else {
 3575                 SLIST_INSERT_HEAD(&sim->ccb_freeq, &free_ccb->ccb_h,
 3576                     xpt_links.sle);
 3577         }
 3578         if (sim->devq == NULL) {
 3579                 return;
 3580         }
 3581         sim->devq->alloc_openings++;
 3582         sim->devq->alloc_active--;
 3583         if (device_is_alloc_queued(device) == 0)
 3584                 xpt_schedule_dev_allocq(bus, device);
 3585         xpt_run_dev_allocq(bus);
 3586 }
 3587 
 3588 /* Functions accessed by SIM drivers */
 3589 
 3590 static struct xpt_xport xport_default = {
 3591         .alloc_device = xpt_alloc_device_default,
 3592         .action = xpt_action_default,
 3593         .async = xpt_dev_async_default,
 3594 };
 3595 
 3596 /*
 3597  * A sim structure, listing the SIM entry points and instance
 3598  * identification info is passed to xpt_bus_register to hook the SIM
 3599  * into the CAM framework.  xpt_bus_register creates a cam_eb entry
 3600  * for this new bus and places it in the array of busses and assigns
 3601  * it a path_id.  The path_id may be influenced by "hard wiring"
 3602  * information specified by the user.  Once interrupt services are
 3603  * available, the bus will be probed.
 3604  */
 3605 int32_t
 3606 xpt_bus_register(struct cam_sim *sim, device_t parent, u_int32_t bus)
 3607 {
 3608         struct cam_eb *new_bus;
 3609         struct cam_eb *old_bus;
 3610         struct ccb_pathinq cpi;
 3611         struct cam_path *path;
 3612         cam_status status;
 3613 
 3614         mtx_assert(sim->mtx, MA_OWNED);
 3615 
 3616         sim->bus_id = bus;
 3617         new_bus = (struct cam_eb *)malloc(sizeof(*new_bus),
 3618                                           M_CAMXPT, M_NOWAIT);
 3619         if (new_bus == NULL) {
 3620                 /* Couldn't satisfy request */
 3621                 return (CAM_RESRC_UNAVAIL);
 3622         }
 3623         path = (struct cam_path *)malloc(sizeof(*path), M_CAMXPT, M_NOWAIT);
 3624         if (path == NULL) {
 3625                 free(new_bus, M_CAMXPT);
 3626                 return (CAM_RESRC_UNAVAIL);
 3627         }
 3628 
 3629         if (strcmp(sim->sim_name, "xpt") != 0) {
 3630                 sim->path_id =
 3631                     xptpathid(sim->sim_name, sim->unit_number, sim->bus_id);
 3632         }
 3633 
 3634         TAILQ_INIT(&new_bus->et_entries);
 3635         new_bus->path_id = sim->path_id;
 3636         cam_sim_hold(sim);
 3637         new_bus->sim = sim;
 3638         timevalclear(&new_bus->last_reset);
 3639         new_bus->flags = 0;
 3640         new_bus->refcount = 1;  /* Held until a bus_deregister event */
 3641         new_bus->generation = 0;
 3642 
 3643         mtx_lock(&xsoftc.xpt_topo_lock);
 3644         old_bus = TAILQ_FIRST(&xsoftc.xpt_busses);
 3645         while (old_bus != NULL
 3646             && old_bus->path_id < new_bus->path_id)
 3647                 old_bus = TAILQ_NEXT(old_bus, links);
 3648         if (old_bus != NULL)
 3649                 TAILQ_INSERT_BEFORE(old_bus, new_bus, links);
 3650         else
 3651                 TAILQ_INSERT_TAIL(&xsoftc.xpt_busses, new_bus, links);
 3652         xsoftc.bus_generation++;
 3653         mtx_unlock(&xsoftc.xpt_topo_lock);
 3654 
 3655         /*
 3656          * Set a default transport so that a PATH_INQ can be issued to
 3657          * the SIM.  This will then allow for probing and attaching of
 3658          * a more appropriate transport.
 3659          */
 3660         new_bus->xport = &xport_default;
 3661 
 3662         status = xpt_compile_path(path, /*periph*/NULL, sim->path_id,
 3663                                   CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
 3664         if (status != CAM_REQ_CMP)
 3665                 printf("xpt_compile_path returned %d\n", status);
 3666 
 3667         xpt_setup_ccb(&cpi.ccb_h, path, CAM_PRIORITY_NORMAL);
 3668         cpi.ccb_h.func_code = XPT_PATH_INQ;
 3669         xpt_action((union ccb *)&cpi);
 3670 
 3671         if (cpi.ccb_h.status == CAM_REQ_CMP) {
 3672                 switch (cpi.transport) {
 3673                 case XPORT_SPI:
 3674                 case XPORT_SAS:
 3675                 case XPORT_FC:
 3676                 case XPORT_USB:
 3677                 case XPORT_ISCSI:
 3678                 case XPORT_PPB:
 3679                         new_bus->xport = scsi_get_xport();
 3680                         break;
 3681                 case XPORT_ATA:
 3682                 case XPORT_SATA:
 3683                         new_bus->xport = ata_get_xport();
 3684                         break;
 3685                 default:
 3686                         new_bus->xport = &xport_default;
 3687                         break;
 3688                 }
 3689         }
 3690 
 3691         /* Notify interested parties */
 3692         if (sim->path_id != CAM_XPT_PATH_ID) {
 3693                 union   ccb *scan_ccb;
 3694 
 3695                 xpt_async(AC_PATH_REGISTERED, path, &cpi);
 3696                 /* Initiate bus rescan. */
 3697                 scan_ccb = xpt_alloc_ccb_nowait();
 3698                 scan_ccb->ccb_h.path = path;
 3699                 scan_ccb->ccb_h.func_code = XPT_SCAN_BUS;
 3700                 scan_ccb->crcn.flags = 0;
 3701                 xpt_rescan(scan_ccb);
 3702         } else
 3703                 xpt_free_path(path);
 3704         return (CAM_SUCCESS);
 3705 }
 3706 
 3707 int32_t
 3708 xpt_bus_deregister(path_id_t pathid)
 3709 {
 3710         struct cam_path bus_path;
 3711         cam_status status;
 3712 
 3713         status = xpt_compile_path(&bus_path, NULL, pathid,
 3714                                   CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
 3715         if (status != CAM_REQ_CMP)
 3716                 return (status);
 3717 
 3718         xpt_async(AC_LOST_DEVICE, &bus_path, NULL);
 3719         xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL);
 3720 
 3721         /* Release the reference count held while registered. */
 3722         xpt_release_bus(bus_path.bus);
 3723         xpt_release_path(&bus_path);
 3724 
 3725         return (CAM_REQ_CMP);
 3726 }
 3727 
 3728 static path_id_t
 3729 xptnextfreepathid(void)
 3730 {
 3731         struct cam_eb *bus;
 3732         path_id_t pathid;
 3733         const char *strval;
 3734 
 3735         pathid = 0;
 3736         mtx_lock(&xsoftc.xpt_topo_lock);
 3737         bus = TAILQ_FIRST(&xsoftc.xpt_busses);
 3738 retry:
 3739         /* Find an unoccupied pathid */
 3740         while (bus != NULL && bus->path_id <= pathid) {
 3741                 if (bus->path_id == pathid)
 3742                         pathid++;
 3743                 bus = TAILQ_NEXT(bus, links);
 3744         }
 3745         mtx_unlock(&xsoftc.xpt_topo_lock);
 3746 
 3747         /*
 3748          * Ensure that this pathid is not reserved for
 3749          * a bus that may be registered in the future.
 3750          */
 3751         if (resource_string_value("scbus", pathid, "at", &strval) == 0) {
 3752                 ++pathid;
 3753                 /* Start the search over */
 3754                 mtx_lock(&xsoftc.xpt_topo_lock);
 3755                 goto retry;
 3756         }
 3757         return (pathid);
 3758 }
 3759 
 3760 static path_id_t
 3761 xptpathid(const char *sim_name, int sim_unit, int sim_bus)
 3762 {
 3763         path_id_t pathid;
 3764         int i, dunit, val;
 3765         char buf[32];
 3766         const char *dname;
 3767 
 3768         pathid = CAM_XPT_PATH_ID;
 3769         snprintf(buf, sizeof(buf), "%s%d", sim_name, sim_unit);
 3770         i = 0;
 3771         while ((resource_find_match(&i, &dname, &dunit, "at", buf)) == 0) {
 3772                 if (strcmp(dname, "scbus")) {
 3773                         /* Avoid a bit of foot shooting. */
 3774                         continue;
 3775                 }
 3776                 if (dunit < 0)          /* unwired?! */
 3777                         continue;
 3778                 if (resource_int_value("scbus", dunit, "bus", &val) == 0) {
 3779                         if (sim_bus == val) {
 3780                                 pathid = dunit;
 3781                                 break;
 3782                         }
 3783                 } else if (sim_bus == 0) {
 3784                         /* Unspecified matches bus 0 */
 3785                         pathid = dunit;
 3786                         break;
 3787                 } else {
 3788                         printf("Ambiguous scbus configuration for %s%d "
 3789                                "bus %d, cannot wire down.  The kernel "
 3790                                "config entry for scbus%d should "
 3791                                "specify a controller bus.\n"
 3792                                "Scbus will be assigned dynamically.\n",
 3793                                sim_name, sim_unit, sim_bus, dunit);
 3794                         break;
 3795                 }
 3796         }
 3797 
 3798         if (pathid == CAM_XPT_PATH_ID)
 3799                 pathid = xptnextfreepathid();
 3800         return (pathid);
 3801 }
 3802 
 3803 void
 3804 xpt_async(u_int32_t async_code, struct cam_path *path, void *async_arg)
 3805 {
 3806         struct cam_eb *bus;
 3807         struct cam_et *target, *next_target;
 3808         struct cam_ed *device, *next_device;
 3809 
 3810         mtx_assert(path->bus->sim->mtx, MA_OWNED);
 3811 
 3812         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_async\n"));
 3813 
 3814         /*
 3815          * Most async events come from a CAM interrupt context.  In
 3816          * a few cases, the error recovery code at the peripheral layer,
 3817          * which may run from our SWI or a process context, may signal
 3818          * deferred events with a call to xpt_async.
 3819          */
 3820 
 3821         bus = path->bus;
 3822 
 3823         if (async_code == AC_BUS_RESET) {
 3824                 /* Update our notion of when the last reset occurred */
 3825                 microtime(&bus->last_reset);
 3826         }
 3827 
 3828         for (target = TAILQ_FIRST(&bus->et_entries);
 3829              target != NULL;
 3830              target = next_target) {
 3831 
 3832                 next_target = TAILQ_NEXT(target, links);
 3833 
 3834                 if (path->target != target
 3835                  && path->target->target_id != CAM_TARGET_WILDCARD
 3836                  && target->target_id != CAM_TARGET_WILDCARD)
 3837                         continue;
 3838 
 3839                 if (async_code == AC_SENT_BDR) {
 3840                         /* Update our notion of when the last reset occurred */
 3841                         microtime(&path->target->last_reset);
 3842                 }
 3843 
 3844                 for (device = TAILQ_FIRST(&target->ed_entries);
 3845                      device != NULL;
 3846                      device = next_device) {
 3847 
 3848                         next_device = TAILQ_NEXT(device, links);
 3849 
 3850                         if (path->device != device
 3851                          && path->device->lun_id != CAM_LUN_WILDCARD
 3852                          && device->lun_id != CAM_LUN_WILDCARD)
 3853                                 continue;
 3854                         /*
 3855                          * The async callback could free the device.
 3856                          * If it is a broadcast async, it doesn't hold
 3857                          * device reference, so take our own reference.
 3858                          */
 3859                         xpt_acquire_device(device);
 3860                         (*(bus->xport->async))(async_code, bus,
 3861                                                target, device,
 3862                                                async_arg);
 3863 
 3864                         xpt_async_bcast(&device->asyncs, async_code,
 3865                                         path, async_arg);
 3866                         xpt_release_device(device);
 3867                 }
 3868         }
 3869 
 3870         /*
 3871          * If this wasn't a fully wildcarded async, tell all
 3872          * clients that want all async events.
 3873          */
 3874         if (bus != xpt_periph->path->bus)
 3875                 xpt_async_bcast(&xpt_periph->path->device->asyncs, async_code,
 3876                                 path, async_arg);
 3877 }
 3878 
 3879 static void
 3880 xpt_async_bcast(struct async_list *async_head,
 3881                 u_int32_t async_code,
 3882                 struct cam_path *path, void *async_arg)
 3883 {
 3884         struct async_node *cur_entry;
 3885 
 3886         cur_entry = SLIST_FIRST(async_head);
 3887         while (cur_entry != NULL) {
 3888                 struct async_node *next_entry;
 3889                 /*
 3890                  * Grab the next list entry before we call the current
 3891                  * entry's callback.  This is because the callback function
 3892                  * can delete its async callback entry.
 3893                  */
 3894                 next_entry = SLIST_NEXT(cur_entry, links);
 3895                 if ((cur_entry->event_enable & async_code) != 0)
 3896                         cur_entry->callback(cur_entry->callback_arg,
 3897                                             async_code, path,
 3898                                             async_arg);
 3899                 cur_entry = next_entry;
 3900         }
 3901 }
 3902 
 3903 static void
 3904 xpt_dev_async_default(u_int32_t async_code, struct cam_eb *bus,
 3905                       struct cam_et *target, struct cam_ed *device,
 3906                       void *async_arg)
 3907 {
 3908         printf("%s called\n", __func__);
 3909 }
 3910 
 3911 u_int32_t
 3912 xpt_freeze_devq_rl(struct cam_path *path, cam_rl rl, u_int count)
 3913 {
 3914         struct cam_ed *dev = path->device;
 3915 
 3916         mtx_assert(path->bus->sim->mtx, MA_OWNED);
 3917         dev->sim->devq->alloc_openings +=
 3918             cam_ccbq_freeze(&dev->ccbq, rl, count);
 3919         /* Remove frozen device from allocq. */
 3920         if (device_is_alloc_queued(dev) &&
 3921             cam_ccbq_frozen(&dev->ccbq, CAM_PRIORITY_TO_RL(
 3922              CAMQ_GET_PRIO(&dev->drvq)))) {
 3923                 camq_remove(&dev->sim->devq->alloc_queue,
 3924                     dev->alloc_ccb_entry.pinfo.index);
 3925         }
 3926         /* Remove frozen device from sendq. */
 3927         if (device_is_send_queued(dev) &&
 3928             cam_ccbq_frozen_top(&dev->ccbq)) {
 3929                 camq_remove(&dev->sim->devq->send_queue,
 3930                     dev->send_ccb_entry.pinfo.index);
 3931         }
 3932         return (dev->ccbq.queue.qfrozen_cnt[rl]);
 3933 }
 3934 
 3935 u_int32_t
 3936 xpt_freeze_devq(struct cam_path *path, u_int count)
 3937 {
 3938 
 3939         return (xpt_freeze_devq_rl(path, 0, count));
 3940 }
 3941 
 3942 u_int32_t
 3943 xpt_freeze_simq(struct cam_sim *sim, u_int count)
 3944 {
 3945 
 3946         mtx_assert(sim->mtx, MA_OWNED);
 3947         sim->devq->send_queue.qfrozen_cnt[0] += count;
 3948         return (sim->devq->send_queue.qfrozen_cnt[0]);
 3949 }
 3950 
 3951 static void
 3952 xpt_release_devq_timeout(void *arg)
 3953 {
 3954         struct cam_ed *device;
 3955 
 3956         device = (struct cam_ed *)arg;
 3957 
 3958         xpt_release_devq_device(device, /*rl*/0, /*count*/1, /*run_queue*/TRUE);
 3959 }
 3960 
 3961 void
 3962 xpt_release_devq(struct cam_path *path, u_int count, int run_queue)
 3963 {
 3964         mtx_assert(path->bus->sim->mtx, MA_OWNED);
 3965 
 3966         xpt_release_devq_device(path->device, /*rl*/0, count, run_queue);
 3967 }
 3968 
 3969 void
 3970 xpt_release_devq_rl(struct cam_path *path, cam_rl rl, u_int count, int run_queue)
 3971 {
 3972         mtx_assert(path->bus->sim->mtx, MA_OWNED);
 3973 
 3974         xpt_release_devq_device(path->device, rl, count, run_queue);
 3975 }
 3976 
 3977 static void
 3978 xpt_release_devq_device(struct cam_ed *dev, cam_rl rl, u_int count, int run_queue)
 3979 {
 3980 
 3981         if (count > dev->ccbq.queue.qfrozen_cnt[rl]) {
 3982 #ifdef INVARIANTS
 3983                 printf("xpt_release_devq(%d): requested %u > present %u\n",
 3984                     rl, count, dev->ccbq.queue.qfrozen_cnt[rl]);
 3985 #endif
 3986                 count = dev->ccbq.queue.qfrozen_cnt[rl];
 3987         }
 3988         dev->sim->devq->alloc_openings -=
 3989             cam_ccbq_release(&dev->ccbq, rl, count);
 3990         if (cam_ccbq_frozen(&dev->ccbq, CAM_PRIORITY_TO_RL(
 3991             CAMQ_GET_PRIO(&dev->drvq))) == 0) {
 3992                 if (xpt_schedule_dev_allocq(dev->target->bus, dev))
 3993                         xpt_run_dev_allocq(dev->target->bus);
 3994         }
 3995         if (cam_ccbq_frozen_top(&dev->ccbq) == 0) {
 3996                 /*
 3997                  * No longer need to wait for a successful
 3998                  * command completion.
 3999                  */
 4000                 dev->flags &= ~CAM_DEV_REL_ON_COMPLETE;
 4001                 /*
 4002                  * Remove any timeouts that might be scheduled
 4003                  * to release this queue.
 4004                  */
 4005                 if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
 4006                         callout_stop(&dev->callout);
 4007                         dev->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING;
 4008                 }
 4009                 if (run_queue == 0)
 4010                         return;
 4011                 /*
 4012                  * Now that we are unfrozen schedule the
 4013                  * device so any pending transactions are
 4014                  * run.
 4015                  */
 4016                 if (xpt_schedule_dev_sendq(dev->target->bus, dev))
 4017                         xpt_run_dev_sendq(dev->target->bus);
 4018         }
 4019 }
 4020 
 4021 void
 4022 xpt_release_simq(struct cam_sim *sim, int run_queue)
 4023 {
 4024         struct  camq *sendq;
 4025 
 4026         mtx_assert(sim->mtx, MA_OWNED);
 4027         sendq = &(sim->devq->send_queue);
 4028         if (sendq->qfrozen_cnt[0] <= 0) {
 4029 #ifdef INVARIANTS
 4030                 printf("xpt_release_simq: requested 1 > present %u\n",
 4031                     sendq->qfrozen_cnt[0]);
 4032 #endif
 4033         } else
 4034                 sendq->qfrozen_cnt[0]--;
 4035         if (sendq->qfrozen_cnt[0] == 0) {
 4036                 /*
 4037                  * If there is a timeout scheduled to release this
 4038                  * sim queue, remove it.  The queue frozen count is
 4039                  * already at 0.
 4040                  */
 4041                 if ((sim->flags & CAM_SIM_REL_TIMEOUT_PENDING) != 0){
 4042                         callout_stop(&sim->callout);
 4043                         sim->flags &= ~CAM_SIM_REL_TIMEOUT_PENDING;
 4044                 }
 4045                 if (run_queue) {
 4046                         struct cam_eb *bus;
 4047 
 4048                         /*
 4049                          * Now that we are unfrozen run the send queue.
 4050                          */
 4051                         bus = xpt_find_bus(sim->path_id);
 4052                         xpt_run_dev_sendq(bus);
 4053                         xpt_release_bus(bus);
 4054                 }
 4055         }
 4056 }
 4057 
 4058 /*
 4059  * XXX Appears to be unused.
 4060  */
 4061 static void
 4062 xpt_release_simq_timeout(void *arg)
 4063 {
 4064         struct cam_sim *sim;
 4065 
 4066         sim = (struct cam_sim *)arg;
 4067         xpt_release_simq(sim, /* run_queue */ TRUE);
 4068 }
 4069 
 4070 void
 4071 xpt_done(union ccb *done_ccb)
 4072 {
 4073         struct cam_sim *sim;
 4074         int     first;
 4075 
 4076         CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_done\n"));
 4077         if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) != 0) {
 4078                 /*
 4079                  * Queue up the request for handling by our SWI handler
 4080                  * any of the "non-immediate" type of ccbs.
 4081                  */
 4082                 sim = done_ccb->ccb_h.path->bus->sim;
 4083                 TAILQ_INSERT_TAIL(&sim->sim_doneq, &done_ccb->ccb_h,
 4084                     sim_links.tqe);
 4085                 done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX;
 4086                 if ((sim->flags & CAM_SIM_ON_DONEQ) == 0) {
 4087                         mtx_lock(&cam_simq_lock);
 4088                         first = TAILQ_EMPTY(&cam_simq);
 4089                         TAILQ_INSERT_TAIL(&cam_simq, sim, links);
 4090                         mtx_unlock(&cam_simq_lock);
 4091                         sim->flags |= CAM_SIM_ON_DONEQ;
 4092                         if (first)
 4093                                 swi_sched(cambio_ih, 0);
 4094                 }
 4095         }
 4096 }
 4097 
 4098 union ccb *
 4099 xpt_alloc_ccb()
 4100 {
 4101         union ccb *new_ccb;
 4102 
 4103         new_ccb = malloc(sizeof(*new_ccb), M_CAMXPT, M_ZERO|M_WAITOK);
 4104         return (new_ccb);
 4105 }
 4106 
 4107 union ccb *
 4108 xpt_alloc_ccb_nowait()
 4109 {
 4110         union ccb *new_ccb;
 4111 
 4112         new_ccb = malloc(sizeof(*new_ccb), M_CAMXPT, M_ZERO|M_NOWAIT);
 4113         return (new_ccb);
 4114 }
 4115 
 4116 void
 4117 xpt_free_ccb(union ccb *free_ccb)
 4118 {
 4119         free(free_ccb, M_CAMXPT);
 4120 }
 4121 
 4122 
 4123 
 4124 /* Private XPT functions */
 4125 
 4126 /*
 4127  * Get a CAM control block for the caller. Charge the structure to the device
 4128  * referenced by the path.  If the this device has no 'credits' then the
 4129  * device already has the maximum number of outstanding operations under way
 4130  * and we return NULL. If we don't have sufficient resources to allocate more
 4131  * ccbs, we also return NULL.
 4132  */
 4133 static union ccb *
 4134 xpt_get_ccb(struct cam_ed *device)
 4135 {
 4136         union ccb *new_ccb;
 4137         struct cam_sim *sim;
 4138 
 4139         sim = device->sim;
 4140         if ((new_ccb = (union ccb *)SLIST_FIRST(&sim->ccb_freeq)) == NULL) {
 4141                 new_ccb = xpt_alloc_ccb_nowait();
 4142                 if (new_ccb == NULL) {
 4143                         return (NULL);
 4144                 }
 4145                 if ((sim->flags & CAM_SIM_MPSAFE) == 0)
 4146                         callout_handle_init(&new_ccb->ccb_h.timeout_ch);
 4147                 SLIST_INSERT_HEAD(&sim->ccb_freeq, &new_ccb->ccb_h,
 4148                                   xpt_links.sle);
 4149                 sim->ccb_count++;
 4150         }
 4151         cam_ccbq_take_opening(&device->ccbq);
 4152         SLIST_REMOVE_HEAD(&sim->ccb_freeq, xpt_links.sle);
 4153         return (new_ccb);
 4154 }
 4155 
 4156 static void
 4157 xpt_release_bus(struct cam_eb *bus)
 4158 {
 4159 
 4160         if ((--bus->refcount == 0)
 4161          && (TAILQ_FIRST(&bus->et_entries) == NULL)) {
 4162                 mtx_lock(&xsoftc.xpt_topo_lock);
 4163                 TAILQ_REMOVE(&xsoftc.xpt_busses, bus, links);
 4164                 xsoftc.bus_generation++;
 4165                 mtx_unlock(&xsoftc.xpt_topo_lock);
 4166                 cam_sim_release(bus->sim);
 4167                 free(bus, M_CAMXPT);
 4168         }
 4169 }
 4170 
 4171 static struct cam_et *
 4172 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id)
 4173 {
 4174         struct cam_et *target;
 4175 
 4176         target = (struct cam_et *)malloc(sizeof(*target), M_CAMXPT, M_NOWAIT);
 4177         if (target != NULL) {
 4178                 struct cam_et *cur_target;
 4179 
 4180                 TAILQ_INIT(&target->ed_entries);
 4181                 target->bus = bus;
 4182                 target->target_id = target_id;
 4183                 target->refcount = 1;
 4184                 target->generation = 0;
 4185                 timevalclear(&target->last_reset);
 4186                 /*
 4187                  * Hold a reference to our parent bus so it
 4188                  * will not go away before we do.
 4189                  */
 4190                 bus->refcount++;
 4191 
 4192                 /* Insertion sort into our bus's target list */
 4193                 cur_target = TAILQ_FIRST(&bus->et_entries);
 4194                 while (cur_target != NULL && cur_target->target_id < target_id)
 4195                         cur_target = TAILQ_NEXT(cur_target, links);
 4196 
 4197                 if (cur_target != NULL) {
 4198                         TAILQ_INSERT_BEFORE(cur_target, target, links);
 4199                 } else {
 4200                         TAILQ_INSERT_TAIL(&bus->et_entries, target, links);
 4201                 }
 4202                 bus->generation++;
 4203         }
 4204         return (target);
 4205 }
 4206 
 4207 static void
 4208 xpt_release_target(struct cam_et *target)
 4209 {
 4210 
 4211         if ((--target->refcount == 0)
 4212          && (TAILQ_FIRST(&target->ed_entries) == NULL)) {
 4213                 TAILQ_REMOVE(&target->bus->et_entries, target, links);
 4214                 target->bus->generation++;
 4215                 xpt_release_bus(target->bus);
 4216                 free(target, M_CAMXPT);
 4217         }
 4218 }
 4219 
 4220 static struct cam_ed *
 4221 xpt_alloc_device_default(struct cam_eb *bus, struct cam_et *target,
 4222                          lun_id_t lun_id)
 4223 {
 4224         struct cam_ed *device, *cur_device;
 4225 
 4226         device = xpt_alloc_device(bus, target, lun_id);
 4227         if (device == NULL)
 4228                 return (NULL);
 4229 
 4230         device->mintags = 1;
 4231         device->maxtags = 1;
 4232         bus->sim->max_ccbs += device->ccbq.devq_openings;
 4233         cur_device = TAILQ_FIRST(&target->ed_entries);
 4234         while (cur_device != NULL && cur_device->lun_id < lun_id)
 4235                 cur_device = TAILQ_NEXT(cur_device, links);
 4236         if (cur_device != NULL) {
 4237                 TAILQ_INSERT_BEFORE(cur_device, device, links);
 4238         } else {
 4239                 TAILQ_INSERT_TAIL(&target->ed_entries, device, links);
 4240         }
 4241         target->generation++;
 4242 
 4243         return (device);
 4244 }
 4245 
 4246 struct cam_ed *
 4247 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id)
 4248 {
 4249         struct     cam_ed *device;
 4250         struct     cam_devq *devq;
 4251         cam_status status;
 4252 
 4253         /* Make space for us in the device queue on our bus */
 4254         devq = bus->sim->devq;
 4255         status = cam_devq_resize(devq, devq->alloc_queue.array_size + 1);
 4256 
 4257         if (status != CAM_REQ_CMP) {
 4258                 device = NULL;
 4259         } else {
 4260                 device = (struct cam_ed *)malloc(sizeof(*device),
 4261                                                  M_CAMXPT, M_NOWAIT);
 4262         }
 4263 
 4264         if (device != NULL) {
 4265                 cam_init_pinfo(&device->alloc_ccb_entry.pinfo);
 4266                 device->alloc_ccb_entry.device = device;
 4267                 cam_init_pinfo(&device->send_ccb_entry.pinfo);
 4268                 device->send_ccb_entry.device = device;
 4269                 device->target = target;
 4270                 device->lun_id = lun_id;
 4271                 device->sim = bus->sim;
 4272                 /* Initialize our queues */
 4273                 if (camq_init(&device->drvq, 0) != 0) {
 4274                         free(device, M_CAMXPT);
 4275                         return (NULL);
 4276                 }
 4277                 if (cam_ccbq_init(&device->ccbq,
 4278                                   bus->sim->max_dev_openings) != 0) {
 4279                         camq_fini(&device->drvq);
 4280                         free(device, M_CAMXPT);
 4281                         return (NULL);
 4282                 }
 4283                 SLIST_INIT(&device->asyncs);
 4284                 SLIST_INIT(&device->periphs);
 4285                 device->generation = 0;
 4286                 device->owner = NULL;
 4287                 device->flags = CAM_DEV_UNCONFIGURED;
 4288                 device->tag_delay_count = 0;
 4289                 device->tag_saved_openings = 0;
 4290                 device->refcount = 1;
 4291                 callout_init_mtx(&device->callout, bus->sim->mtx, 0);
 4292 
 4293                 /*
 4294                  * Hold a reference to our parent target so it
 4295                  * will not go away before we do.
 4296                  */
 4297                 target->refcount++;
 4298 
 4299         }
 4300         return (device);
 4301 }
 4302 
 4303 void
 4304 xpt_acquire_device(struct cam_ed *device)
 4305 {
 4306 
 4307         device->refcount++;
 4308 }
 4309 
 4310 void
 4311 xpt_release_device(struct cam_ed *device)
 4312 {
 4313 
 4314         if (--device->refcount == 0) {
 4315                 struct cam_devq *devq;
 4316 
 4317                 if (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX
 4318                  || device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX)
 4319                         panic("Removing device while still queued for ccbs");
 4320 
 4321                 if ((device->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0)
 4322                                 callout_stop(&device->callout);
 4323 
 4324                 TAILQ_REMOVE(&device->target->ed_entries, device,links);
 4325                 device->target->generation++;
 4326                 device->target->bus->sim->max_ccbs -= device->ccbq.devq_openings;
 4327                 /* Release our slot in the devq */
 4328                 devq = device->target->bus->sim->devq;
 4329                 cam_devq_resize(devq, devq->alloc_queue.array_size - 1);
 4330                 camq_fini(&device->drvq);
 4331                 cam_ccbq_fini(&device->ccbq);
 4332                 xpt_release_target(device->target);
 4333                 free(device, M_CAMXPT);
 4334         }
 4335 }
 4336 
 4337 u_int32_t
 4338 xpt_dev_ccbq_resize(struct cam_path *path, int newopenings)
 4339 {
 4340         int     diff;
 4341         int     result;
 4342         struct  cam_ed *dev;
 4343 
 4344         dev = path->device;
 4345 
 4346         diff = newopenings - (dev->ccbq.dev_active + dev->ccbq.dev_openings);
 4347         result = cam_ccbq_resize(&dev->ccbq, newopenings);
 4348         if (result == CAM_REQ_CMP && (diff < 0)) {
 4349                 dev->flags |= CAM_DEV_RESIZE_QUEUE_NEEDED;
 4350         }
 4351         if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
 4352          || (dev->inq_flags & SID_CmdQue) != 0)
 4353                 dev->tag_saved_openings = newopenings;
 4354         /* Adjust the global limit */
 4355         dev->sim->max_ccbs += diff;
 4356         return (result);
 4357 }
 4358 
 4359 static struct cam_eb *
 4360 xpt_find_bus(path_id_t path_id)
 4361 {
 4362         struct cam_eb *bus;
 4363 
 4364         mtx_lock(&xsoftc.xpt_topo_lock);
 4365         for (bus = TAILQ_FIRST(&xsoftc.xpt_busses);
 4366              bus != NULL;
 4367              bus = TAILQ_NEXT(bus, links)) {
 4368                 if (bus->path_id == path_id) {
 4369                         bus->refcount++;
 4370                         break;
 4371                 }
 4372         }
 4373         mtx_unlock(&xsoftc.xpt_topo_lock);
 4374         return (bus);
 4375 }
 4376 
 4377 static struct cam_et *
 4378 xpt_find_target(struct cam_eb *bus, target_id_t target_id)
 4379 {
 4380         struct cam_et *target;
 4381 
 4382         for (target = TAILQ_FIRST(&bus->et_entries);
 4383              target != NULL;
 4384              target = TAILQ_NEXT(target, links)) {
 4385                 if (target->target_id == target_id) {
 4386                         target->refcount++;
 4387                         break;
 4388                 }
 4389         }
 4390         return (target);
 4391 }
 4392 
 4393 static struct cam_ed *
 4394 xpt_find_device(struct cam_et *target, lun_id_t lun_id)
 4395 {
 4396         struct cam_ed *device;
 4397 
 4398         for (device = TAILQ_FIRST(&target->ed_entries);
 4399              device != NULL;
 4400              device = TAILQ_NEXT(device, links)) {
 4401                 if (device->lun_id == lun_id) {
 4402                         device->refcount++;
 4403                         break;
 4404                 }
 4405         }
 4406         return (device);
 4407 }
 4408 
 4409 void
 4410 xpt_start_tags(struct cam_path *path)
 4411 {
 4412         struct ccb_relsim crs;
 4413         struct cam_ed *device;
 4414         struct cam_sim *sim;
 4415         int    newopenings;
 4416 
 4417         device = path->device;
 4418         sim = path->bus->sim;
 4419         device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
 4420         xpt_freeze_devq(path, /*count*/1);
 4421         device->inq_flags |= SID_CmdQue;
 4422         if (device->tag_saved_openings != 0)
 4423                 newopenings = device->tag_saved_openings;
 4424         else
 4425                 newopenings = min(device->maxtags,
 4426                                   sim->max_tagged_dev_openings);
 4427         xpt_dev_ccbq_resize(path, newopenings);
 4428         xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL);
 4429         crs.ccb_h.func_code = XPT_REL_SIMQ;
 4430         crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
 4431         crs.openings
 4432             = crs.release_timeout
 4433             = crs.qfrozen_cnt
 4434             = 0;
 4435         xpt_action((union ccb *)&crs);
 4436 }
 4437 
 4438 void
 4439 xpt_stop_tags(struct cam_path *path)
 4440 {
 4441         struct ccb_relsim crs;
 4442         struct cam_ed *device;
 4443         struct cam_sim *sim;
 4444 
 4445         device = path->device;
 4446         sim = path->bus->sim;
 4447         device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
 4448         device->tag_delay_count = 0;
 4449         xpt_freeze_devq(path, /*count*/1);
 4450         device->inq_flags &= ~SID_CmdQue;
 4451         xpt_dev_ccbq_resize(path, sim->max_dev_openings);
 4452         xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL);
 4453         crs.ccb_h.func_code = XPT_REL_SIMQ;
 4454         crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
 4455         crs.openings
 4456             = crs.release_timeout
 4457             = crs.qfrozen_cnt
 4458             = 0;
 4459         xpt_action((union ccb *)&crs);
 4460 }
 4461 
 4462 static void
 4463 xpt_boot_delay(void *arg)
 4464 {
 4465 
 4466         xpt_release_boot();
 4467 }
 4468 
 4469 static void
 4470 xpt_config(void *arg)
 4471 {
 4472         /*
 4473          * Now that interrupts are enabled, go find our devices
 4474          */
 4475 
 4476 #ifdef CAMDEBUG
 4477         /* Setup debugging flags and path */
 4478 #ifdef CAM_DEBUG_FLAGS
 4479         cam_dflags = CAM_DEBUG_FLAGS;
 4480 #else /* !CAM_DEBUG_FLAGS */
 4481         cam_dflags = CAM_DEBUG_NONE;
 4482 #endif /* CAM_DEBUG_FLAGS */
 4483 #ifdef CAM_DEBUG_BUS
 4484         if (cam_dflags != CAM_DEBUG_NONE) {
 4485                 /*
 4486                  * Locking is specifically omitted here.  No SIMs have
 4487                  * registered yet, so xpt_create_path will only be searching
 4488                  * empty lists of targets and devices.
 4489                  */
 4490                 if (xpt_create_path(&cam_dpath, xpt_periph,
 4491                                     CAM_DEBUG_BUS, CAM_DEBUG_TARGET,
 4492                                     CAM_DEBUG_LUN) != CAM_REQ_CMP) {
 4493                         printf("xpt_config: xpt_create_path() failed for debug"
 4494                                " target %d:%d:%d, debugging disabled\n",
 4495                                CAM_DEBUG_BUS, CAM_DEBUG_TARGET, CAM_DEBUG_LUN);
 4496                         cam_dflags = CAM_DEBUG_NONE;
 4497                 }
 4498         } else
 4499                 cam_dpath = NULL;
 4500 #else /* !CAM_DEBUG_BUS */
 4501         cam_dpath = NULL;
 4502 #endif /* CAM_DEBUG_BUS */
 4503 #endif /* CAMDEBUG */
 4504 
 4505         periphdriver_init(1);
 4506         xpt_hold_boot();
 4507         callout_init(&xsoftc.boot_callout, 1);
 4508         callout_reset(&xsoftc.boot_callout, hz * xsoftc.boot_delay / 1000,
 4509             xpt_boot_delay, NULL);
 4510         /* Fire up rescan thread. */
 4511         if (kproc_create(xpt_scanner_thread, NULL, NULL, 0, 0, "xpt_thrd")) {
 4512                 printf("xpt_init: failed to create rescan thread\n");
 4513         }
 4514 }
 4515 
 4516 void
 4517 xpt_hold_boot(void)
 4518 {
 4519         xpt_lock_buses();
 4520         xsoftc.buses_to_config++;
 4521         xpt_unlock_buses();
 4522 }
 4523 
 4524 void
 4525 xpt_release_boot(void)
 4526 {
 4527         xpt_lock_buses();
 4528         xsoftc.buses_to_config--;
 4529         if (xsoftc.buses_to_config == 0 && xsoftc.buses_config_done == 0) {
 4530                 struct  xpt_task *task;
 4531 
 4532                 xsoftc.buses_config_done = 1;
 4533                 xpt_unlock_buses();
 4534                 /* Call manually because we don't have any busses */
 4535                 task = malloc(sizeof(struct xpt_task), M_CAMXPT, M_NOWAIT);
 4536                 if (task != NULL) {
 4537                         TASK_INIT(&task->task, 0, xpt_finishconfig_task, task);
 4538                         taskqueue_enqueue(taskqueue_thread, &task->task);
 4539                 }
 4540         } else
 4541                 xpt_unlock_buses();
 4542 }
 4543 
 4544 /*
 4545  * If the given device only has one peripheral attached to it, and if that
 4546  * peripheral is the passthrough driver, announce it.  This insures that the
 4547  * user sees some sort of announcement for every peripheral in their system.
 4548  */
 4549 static int
 4550 xptpassannouncefunc(struct cam_ed *device, void *arg)
 4551 {
 4552         struct cam_periph *periph;
 4553         int i;
 4554 
 4555         for (periph = SLIST_FIRST(&device->periphs), i = 0; periph != NULL;
 4556              periph = SLIST_NEXT(periph, periph_links), i++);
 4557 
 4558         periph = SLIST_FIRST(&device->periphs);
 4559         if ((i == 1)
 4560          && (strncmp(periph->periph_name, "pass", 4) == 0))
 4561                 xpt_announce_periph(periph, NULL);
 4562 
 4563         return(1);
 4564 }
 4565 
 4566 static void
 4567 xpt_finishconfig_task(void *context, int pending)
 4568 {
 4569 
 4570         periphdriver_init(2);
 4571         /*
 4572          * Check for devices with no "standard" peripheral driver
 4573          * attached.  For any devices like that, announce the
 4574          * passthrough driver so the user will see something.
 4575          */
 4576         xpt_for_all_devices(xptpassannouncefunc, NULL);
 4577 
 4578         /* Release our hook so that the boot can continue. */
 4579         config_intrhook_disestablish(xsoftc.xpt_config_hook);
 4580         free(xsoftc.xpt_config_hook, M_CAMXPT);
 4581         xsoftc.xpt_config_hook = NULL;
 4582 
 4583         free(context, M_CAMXPT);
 4584 }
 4585 
 4586 cam_status
 4587 xpt_register_async(int event, ac_callback_t *cbfunc, void *cbarg,
 4588                    struct cam_path *path)
 4589 {
 4590         struct ccb_setasync csa;
 4591         cam_status status;
 4592         int xptpath = 0;
 4593 
 4594         if (path == NULL) {
 4595                 mtx_lock(&xsoftc.xpt_lock);
 4596                 status = xpt_create_path(&path, /*periph*/NULL, CAM_XPT_PATH_ID,
 4597                                          CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
 4598                 if (status != CAM_REQ_CMP) {
 4599                         mtx_unlock(&xsoftc.xpt_lock);
 4600                         return (status);
 4601                 }
 4602                 xptpath = 1;
 4603         }
 4604 
 4605         xpt_setup_ccb(&csa.ccb_h, path, CAM_PRIORITY_NORMAL);
 4606         csa.ccb_h.func_code = XPT_SASYNC_CB;
 4607         csa.event_enable = event;
 4608         csa.callback = cbfunc;
 4609         csa.callback_arg = cbarg;
 4610         xpt_action((union ccb *)&csa);
 4611         status = csa.ccb_h.status;
 4612         if (xptpath) {
 4613                 xpt_free_path(path);
 4614                 mtx_unlock(&xsoftc.xpt_lock);
 4615 
 4616                 if ((status == CAM_REQ_CMP) &&
 4617                     (csa.event_enable & AC_FOUND_DEVICE)) {
 4618                         /*
 4619                          * Get this peripheral up to date with all
 4620                          * the currently existing devices.
 4621                          */
 4622                         xpt_for_all_devices(xptsetasyncfunc, &csa);
 4623                 }
 4624                 if ((status == CAM_REQ_CMP) &&
 4625                     (csa.event_enable & AC_PATH_REGISTERED)) {
 4626                         /*
 4627                          * Get this peripheral up to date with all
 4628                          * the currently existing busses.
 4629                          */
 4630                         xpt_for_all_busses(xptsetasyncbusfunc, &csa);
 4631                 }
 4632         }
 4633         return (status);
 4634 }
 4635 
 4636 static void
 4637 xptaction(struct cam_sim *sim, union ccb *work_ccb)
 4638 {
 4639         CAM_DEBUG(work_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xptaction\n"));
 4640 
 4641         switch (work_ccb->ccb_h.func_code) {
 4642         /* Common cases first */
 4643         case XPT_PATH_INQ:              /* Path routing inquiry */
 4644         {
 4645                 struct ccb_pathinq *cpi;
 4646 
 4647                 cpi = &work_ccb->cpi;
 4648                 cpi->version_num = 1; /* XXX??? */
 4649                 cpi->hba_inquiry = 0;
 4650                 cpi->target_sprt = 0;
 4651                 cpi->hba_misc = 0;
 4652                 cpi->hba_eng_cnt = 0;
 4653                 cpi->max_target = 0;
 4654                 cpi->max_lun = 0;
 4655                 cpi->initiator_id = 0;
 4656                 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
 4657                 strncpy(cpi->hba_vid, "", HBA_IDLEN);
 4658                 strncpy(cpi->dev_name, sim->sim_name, DEV_IDLEN);
 4659                 cpi->unit_number = sim->unit_number;
 4660                 cpi->bus_id = sim->bus_id;
 4661                 cpi->base_transfer_speed = 0;
 4662                 cpi->protocol = PROTO_UNSPECIFIED;
 4663                 cpi->protocol_version = PROTO_VERSION_UNSPECIFIED;
 4664                 cpi->transport = XPORT_UNSPECIFIED;
 4665                 cpi->transport_version = XPORT_VERSION_UNSPECIFIED;
 4666                 cpi->ccb_h.status = CAM_REQ_CMP;
 4667                 xpt_done(work_ccb);
 4668                 break;
 4669         }
 4670         default:
 4671                 work_ccb->ccb_h.status = CAM_REQ_INVALID;
 4672                 xpt_done(work_ccb);
 4673                 break;
 4674         }
 4675 }
 4676 
 4677 /*
 4678  * The xpt as a "controller" has no interrupt sources, so polling
 4679  * is a no-op.
 4680  */
 4681 static void
 4682 xptpoll(struct cam_sim *sim)
 4683 {
 4684 }
 4685 
 4686 void
 4687 xpt_lock_buses(void)
 4688 {
 4689         mtx_lock(&xsoftc.xpt_topo_lock);
 4690 }
 4691 
 4692 void
 4693 xpt_unlock_buses(void)
 4694 {
 4695         mtx_unlock(&xsoftc.xpt_topo_lock);
 4696 }
 4697 
 4698 static void
 4699 camisr(void *dummy)
 4700 {
 4701         cam_simq_t queue;
 4702         struct cam_sim *sim;
 4703 
 4704         mtx_lock(&cam_simq_lock);
 4705         TAILQ_INIT(&queue);
 4706         while (!TAILQ_EMPTY(&cam_simq)) {
 4707                 TAILQ_CONCAT(&queue, &cam_simq, links);
 4708                 mtx_unlock(&cam_simq_lock);
 4709 
 4710                 while ((sim = TAILQ_FIRST(&queue)) != NULL) {
 4711                         TAILQ_REMOVE(&queue, sim, links);
 4712                         CAM_SIM_LOCK(sim);
 4713                         sim->flags &= ~CAM_SIM_ON_DONEQ;
 4714                         camisr_runqueue(&sim->sim_doneq);
 4715                         CAM_SIM_UNLOCK(sim);
 4716                 }
 4717                 mtx_lock(&cam_simq_lock);
 4718         }
 4719         mtx_unlock(&cam_simq_lock);
 4720 }
 4721 
 4722 static void
 4723 camisr_runqueue(void *V_queue)
 4724 {
 4725         cam_isrq_t *queue = V_queue;
 4726         struct  ccb_hdr *ccb_h;
 4727 
 4728         while ((ccb_h = TAILQ_FIRST(queue)) != NULL) {
 4729                 int     runq;
 4730 
 4731                 TAILQ_REMOVE(queue, ccb_h, sim_links.tqe);
 4732                 ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
 4733 
 4734                 CAM_DEBUG(ccb_h->path, CAM_DEBUG_TRACE,
 4735                           ("camisr\n"));
 4736 
 4737                 runq = FALSE;
 4738 
 4739                 if (ccb_h->flags & CAM_HIGH_POWER) {
 4740                         struct highpowerlist    *hphead;
 4741                         union ccb               *send_ccb;
 4742 
 4743                         mtx_lock(&xsoftc.xpt_lock);
 4744                         hphead = &xsoftc.highpowerq;
 4745 
 4746                         send_ccb = (union ccb *)STAILQ_FIRST(hphead);
 4747 
 4748                         /*
 4749                          * Increment the count since this command is done.
 4750                          */
 4751                         xsoftc.num_highpower++;
 4752 
 4753                         /*
 4754                          * Any high powered commands queued up?
 4755                          */
 4756                         if (send_ccb != NULL) {
 4757 
 4758                                 STAILQ_REMOVE_HEAD(hphead, xpt_links.stqe);
 4759                                 mtx_unlock(&xsoftc.xpt_lock);
 4760 
 4761                                 xpt_release_devq(send_ccb->ccb_h.path,
 4762                                                  /*count*/1, /*runqueue*/TRUE);
 4763                         } else
 4764                                 mtx_unlock(&xsoftc.xpt_lock);
 4765                 }
 4766 
 4767                 if ((ccb_h->func_code & XPT_FC_USER_CCB) == 0) {
 4768                         struct cam_ed *dev;
 4769 
 4770                         dev = ccb_h->path->device;
 4771 
 4772                         cam_ccbq_ccb_done(&dev->ccbq, (union ccb *)ccb_h);
 4773                         ccb_h->path->bus->sim->devq->send_active--;
 4774                         ccb_h->path->bus->sim->devq->send_openings++;
 4775                         runq = TRUE;
 4776 
 4777                         if (((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0
 4778                           && (ccb_h->status&CAM_STATUS_MASK) != CAM_REQUEUE_REQ)
 4779                          || ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
 4780                           && (dev->ccbq.dev_active == 0))) {
 4781                                 xpt_release_devq(ccb_h->path, /*count*/1,
 4782                                                  /*run_queue*/FALSE);
 4783                         }
 4784 
 4785                         if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
 4786                          && (--dev->tag_delay_count == 0))
 4787                                 xpt_start_tags(ccb_h->path);
 4788                         if (!device_is_send_queued(dev))
 4789                                 xpt_schedule_dev_sendq(ccb_h->path->bus, dev);
 4790                 }
 4791 
 4792                 if (ccb_h->status & CAM_RELEASE_SIMQ) {
 4793                         xpt_release_simq(ccb_h->path->bus->sim,
 4794                                          /*run_queue*/TRUE);
 4795                         ccb_h->status &= ~CAM_RELEASE_SIMQ;
 4796                         runq = FALSE;
 4797                 }
 4798 
 4799                 if ((ccb_h->flags & CAM_DEV_QFRZDIS)
 4800                  && (ccb_h->status & CAM_DEV_QFRZN)) {
 4801                         xpt_release_devq(ccb_h->path, /*count*/1,
 4802                                          /*run_queue*/TRUE);
 4803                         ccb_h->status &= ~CAM_DEV_QFRZN;
 4804                 } else if (runq) {
 4805                         xpt_run_dev_sendq(ccb_h->path->bus);
 4806                 }
 4807 
 4808                 /* Call the peripheral driver's callback */
 4809                 (*ccb_h->cbfcnp)(ccb_h->path->periph, (union ccb *)ccb_h);
 4810         }
 4811 }

Cache object: 599fb3a6c97b8ce5fdfe9f8de389fa87


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.