The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/cam/cam_xpt.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Implementation of the Common Access Method Transport (XPT) layer.
    3  *
    4  * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs.
    5  * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry.
    6  * All rights reserved.
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions, and the following disclaimer,
   13  *    without modification, immediately at the beginning of the file.
   14  * 2. The name of the author may not be used to endorse or promote products
   15  *    derived from this software without specific prior written permission.
   16  *
   17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   20  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
   21  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   27  * SUCH DAMAGE.
   28  */
   29 
   30 #include <sys/cdefs.h>
   31 __FBSDID("$FreeBSD: releng/8.1/sys/cam/cam_xpt.c 207774 2010-05-08 13:09:36Z mav $");
   32 
   33 #include <sys/param.h>
   34 #include <sys/bus.h>
   35 #include <sys/systm.h>
   36 #include <sys/types.h>
   37 #include <sys/malloc.h>
   38 #include <sys/kernel.h>
   39 #include <sys/time.h>
   40 #include <sys/conf.h>
   41 #include <sys/fcntl.h>
   42 #include <sys/md5.h>
   43 #include <sys/interrupt.h>
   44 #include <sys/sbuf.h>
   45 #include <sys/taskqueue.h>
   46 
   47 #include <sys/lock.h>
   48 #include <sys/mutex.h>
   49 #include <sys/sysctl.h>
   50 #include <sys/kthread.h>
   51 
   52 #ifdef PC98
   53 #include <pc98/pc98/pc98_machdep.h>     /* geometry translation */
   54 #endif
   55 
   56 #include <cam/cam.h>
   57 #include <cam/cam_ccb.h>
   58 #include <cam/cam_periph.h>
   59 #include <cam/cam_queue.h>
   60 #include <cam/cam_sim.h>
   61 #include <cam/cam_xpt.h>
   62 #include <cam/cam_xpt_sim.h>
   63 #include <cam/cam_xpt_periph.h>
   64 #include <cam/cam_xpt_internal.h>
   65 #include <cam/cam_debug.h>
   66 
   67 #include <cam/scsi/scsi_all.h>
   68 #include <cam/scsi/scsi_message.h>
   69 #include <cam/scsi/scsi_pass.h>
   70 #include <machine/stdarg.h>     /* for xpt_print below */
   71 #include "opt_cam.h"
   72 
   73 /*
   74  * This is the maximum number of high powered commands (e.g. start unit)
   75  * that can be outstanding at a particular time.
   76  */
   77 #ifndef CAM_MAX_HIGHPOWER
   78 #define CAM_MAX_HIGHPOWER  4
   79 #endif
   80 
   81 /* Datastructures internal to the xpt layer */
   82 MALLOC_DEFINE(M_CAMXPT, "CAM XPT", "CAM XPT buffers");
   83 
   84 /* Object for defering XPT actions to a taskqueue */
   85 struct xpt_task {
   86         struct task     task;
   87         void            *data1;
   88         uintptr_t       data2;
   89 };
   90 
   91 typedef enum {
   92         XPT_FLAG_OPEN           = 0x01
   93 } xpt_flags;
   94 
   95 struct xpt_softc {
   96         xpt_flags               flags;
   97         u_int32_t               xpt_generation;
   98 
   99         /* number of high powered commands that can go through right now */
  100         STAILQ_HEAD(highpowerlist, ccb_hdr)     highpowerq;
  101         int                     num_highpower;
  102 
  103         /* queue for handling async rescan requests. */
  104         TAILQ_HEAD(, ccb_hdr) ccb_scanq;
  105         int buses_to_config;
  106         int buses_config_done;
  107 
  108         /* Registered busses */
  109         TAILQ_HEAD(,cam_eb)     xpt_busses;
  110         u_int                   bus_generation;
  111 
  112         struct intr_config_hook *xpt_config_hook;
  113 
  114         int                     boot_delay;
  115         struct callout          boot_callout;
  116 
  117         struct mtx              xpt_topo_lock;
  118         struct mtx              xpt_lock;
  119 };
  120 
  121 typedef enum {
  122         DM_RET_COPY             = 0x01,
  123         DM_RET_FLAG_MASK        = 0x0f,
  124         DM_RET_NONE             = 0x00,
  125         DM_RET_STOP             = 0x10,
  126         DM_RET_DESCEND          = 0x20,
  127         DM_RET_ERROR            = 0x30,
  128         DM_RET_ACTION_MASK      = 0xf0
  129 } dev_match_ret;
  130 
  131 typedef enum {
  132         XPT_DEPTH_BUS,
  133         XPT_DEPTH_TARGET,
  134         XPT_DEPTH_DEVICE,
  135         XPT_DEPTH_PERIPH
  136 } xpt_traverse_depth;
  137 
  138 struct xpt_traverse_config {
  139         xpt_traverse_depth      depth;
  140         void                    *tr_func;
  141         void                    *tr_arg;
  142 };
  143 
  144 typedef int     xpt_busfunc_t (struct cam_eb *bus, void *arg);
  145 typedef int     xpt_targetfunc_t (struct cam_et *target, void *arg);
  146 typedef int     xpt_devicefunc_t (struct cam_ed *device, void *arg);
  147 typedef int     xpt_periphfunc_t (struct cam_periph *periph, void *arg);
  148 typedef int     xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg);
  149 
  150 /* Transport layer configuration information */
  151 static struct xpt_softc xsoftc;
  152 
  153 TUNABLE_INT("kern.cam.boot_delay", &xsoftc.boot_delay);
  154 SYSCTL_INT(_kern_cam, OID_AUTO, boot_delay, CTLFLAG_RDTUN,
  155            &xsoftc.boot_delay, 0, "Bus registration wait time");
  156 
  157 /* Queues for our software interrupt handler */
  158 typedef TAILQ_HEAD(cam_isrq, ccb_hdr) cam_isrq_t;
  159 typedef TAILQ_HEAD(cam_simq, cam_sim) cam_simq_t;
  160 static cam_simq_t cam_simq;
  161 static struct mtx cam_simq_lock;
  162 
  163 /* Pointers to software interrupt handlers */
  164 static void *cambio_ih;
  165 
  166 struct cam_periph *xpt_periph;
  167 
  168 static periph_init_t xpt_periph_init;
  169 
  170 static struct periph_driver xpt_driver =
  171 {
  172         xpt_periph_init, "xpt",
  173         TAILQ_HEAD_INITIALIZER(xpt_driver.units), /* generation */ 0,
  174         CAM_PERIPH_DRV_EARLY
  175 };
  176 
  177 PERIPHDRIVER_DECLARE(xpt, xpt_driver);
  178 
  179 static d_open_t xptopen;
  180 static d_close_t xptclose;
  181 static d_ioctl_t xptioctl;
  182 
  183 static struct cdevsw xpt_cdevsw = {
  184         .d_version =    D_VERSION,
  185         .d_flags =      0,
  186         .d_open =       xptopen,
  187         .d_close =      xptclose,
  188         .d_ioctl =      xptioctl,
  189         .d_name =       "xpt",
  190 };
  191 
  192 /* Storage for debugging datastructures */
  193 #ifdef  CAMDEBUG
  194 struct cam_path *cam_dpath;
  195 u_int32_t cam_dflags;
  196 u_int32_t cam_debug_delay;
  197 #endif
  198 
  199 /* Our boot-time initialization hook */
  200 static int cam_module_event_handler(module_t, int /*modeventtype_t*/, void *);
  201 
  202 static moduledata_t cam_moduledata = {
  203         "cam",
  204         cam_module_event_handler,
  205         NULL
  206 };
  207 
  208 static int      xpt_init(void *);
  209 
  210 DECLARE_MODULE(cam, cam_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND);
  211 MODULE_VERSION(cam, 1);
  212 
  213 
  214 static void             xpt_async_bcast(struct async_list *async_head,
  215                                         u_int32_t async_code,
  216                                         struct cam_path *path,
  217                                         void *async_arg);
  218 static path_id_t xptnextfreepathid(void);
  219 static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus);
  220 static union ccb *xpt_get_ccb(struct cam_ed *device);
  221 static void      xpt_run_dev_allocq(struct cam_eb *bus);
  222 static void      xpt_run_dev_sendq(struct cam_eb *bus);
  223 static timeout_t xpt_release_devq_timeout;
  224 static void      xpt_release_simq_timeout(void *arg) __unused;
  225 static void      xpt_release_bus(struct cam_eb *bus);
  226 static void      xpt_release_devq_device(struct cam_ed *dev, cam_rl rl,
  227                     u_int count, int run_queue);
  228 static struct cam_et*
  229                  xpt_alloc_target(struct cam_eb *bus, target_id_t target_id);
  230 static void      xpt_release_target(struct cam_et *target);
  231 static struct cam_eb*
  232                  xpt_find_bus(path_id_t path_id);
  233 static struct cam_et*
  234                  xpt_find_target(struct cam_eb *bus, target_id_t target_id);
  235 static struct cam_ed*
  236                  xpt_find_device(struct cam_et *target, lun_id_t lun_id);
  237 static void      xpt_config(void *arg);
  238 static xpt_devicefunc_t xptpassannouncefunc;
  239 static void      xptaction(struct cam_sim *sim, union ccb *work_ccb);
  240 static void      xptpoll(struct cam_sim *sim);
  241 static void      camisr(void *);
  242 static void      camisr_runqueue(void *);
  243 static dev_match_ret    xptbusmatch(struct dev_match_pattern *patterns,
  244                                     u_int num_patterns, struct cam_eb *bus);
  245 static dev_match_ret    xptdevicematch(struct dev_match_pattern *patterns,
  246                                        u_int num_patterns,
  247                                        struct cam_ed *device);
  248 static dev_match_ret    xptperiphmatch(struct dev_match_pattern *patterns,
  249                                        u_int num_patterns,
  250                                        struct cam_periph *periph);
  251 static xpt_busfunc_t    xptedtbusfunc;
  252 static xpt_targetfunc_t xptedttargetfunc;
  253 static xpt_devicefunc_t xptedtdevicefunc;
  254 static xpt_periphfunc_t xptedtperiphfunc;
  255 static xpt_pdrvfunc_t   xptplistpdrvfunc;
  256 static xpt_periphfunc_t xptplistperiphfunc;
  257 static int              xptedtmatch(struct ccb_dev_match *cdm);
  258 static int              xptperiphlistmatch(struct ccb_dev_match *cdm);
  259 static int              xptbustraverse(struct cam_eb *start_bus,
  260                                        xpt_busfunc_t *tr_func, void *arg);
  261 static int              xpttargettraverse(struct cam_eb *bus,
  262                                           struct cam_et *start_target,
  263                                           xpt_targetfunc_t *tr_func, void *arg);
  264 static int              xptdevicetraverse(struct cam_et *target,
  265                                           struct cam_ed *start_device,
  266                                           xpt_devicefunc_t *tr_func, void *arg);
  267 static int              xptperiphtraverse(struct cam_ed *device,
  268                                           struct cam_periph *start_periph,
  269                                           xpt_periphfunc_t *tr_func, void *arg);
  270 static int              xptpdrvtraverse(struct periph_driver **start_pdrv,
  271                                         xpt_pdrvfunc_t *tr_func, void *arg);
  272 static int              xptpdperiphtraverse(struct periph_driver **pdrv,
  273                                             struct cam_periph *start_periph,
  274                                             xpt_periphfunc_t *tr_func,
  275                                             void *arg);
  276 static xpt_busfunc_t    xptdefbusfunc;
  277 static xpt_targetfunc_t xptdeftargetfunc;
  278 static xpt_devicefunc_t xptdefdevicefunc;
  279 static xpt_periphfunc_t xptdefperiphfunc;
  280 static void             xpt_finishconfig_task(void *context, int pending);
  281 static int              xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg);
  282 static int              xpt_for_all_devices(xpt_devicefunc_t *tr_func,
  283                                             void *arg);
  284 static void             xpt_dev_async_default(u_int32_t async_code,
  285                                               struct cam_eb *bus,
  286                                               struct cam_et *target,
  287                                               struct cam_ed *device,
  288                                               void *async_arg);
  289 static struct cam_ed *  xpt_alloc_device_default(struct cam_eb *bus,
  290                                                  struct cam_et *target,
  291                                                  lun_id_t lun_id);
  292 static xpt_devicefunc_t xptsetasyncfunc;
  293 static xpt_busfunc_t    xptsetasyncbusfunc;
  294 static cam_status       xptregister(struct cam_periph *periph,
  295                                     void *arg);
  296 static __inline int periph_is_queued(struct cam_periph *periph);
  297 static __inline int device_is_alloc_queued(struct cam_ed *device);
  298 static __inline int device_is_send_queued(struct cam_ed *device);
  299 
  300 static __inline int
  301 xpt_schedule_dev_allocq(struct cam_eb *bus, struct cam_ed *dev)
  302 {
  303         int retval;
  304 
  305         if ((dev->drvq.entries > 0) &&
  306             (dev->ccbq.devq_openings > 0) &&
  307             (cam_ccbq_frozen(&dev->ccbq, CAM_PRIORITY_TO_RL(
  308                 CAMQ_GET_PRIO(&dev->drvq))) == 0)) {
  309                 /*
  310                  * The priority of a device waiting for CCB resources
  311                  * is that of the the highest priority peripheral driver
  312                  * enqueued.
  313                  */
  314                 retval = xpt_schedule_dev(&bus->sim->devq->alloc_queue,
  315                                           &dev->alloc_ccb_entry.pinfo,
  316                                           CAMQ_GET_PRIO(&dev->drvq));
  317         } else {
  318                 retval = 0;
  319         }
  320 
  321         return (retval);
  322 }
  323 
  324 static __inline int
  325 xpt_schedule_dev_sendq(struct cam_eb *bus, struct cam_ed *dev)
  326 {
  327         int     retval;
  328 
  329         if ((dev->ccbq.queue.entries > 0) &&
  330             (dev->ccbq.dev_openings > 0) &&
  331             (cam_ccbq_frozen_top(&dev->ccbq) == 0)) {
  332                 /*
  333                  * The priority of a device waiting for controller
  334                  * resources is that of the the highest priority CCB
  335                  * enqueued.
  336                  */
  337                 retval =
  338                     xpt_schedule_dev(&bus->sim->devq->send_queue,
  339                                      &dev->send_ccb_entry.pinfo,
  340                                      CAMQ_GET_PRIO(&dev->ccbq.queue));
  341         } else {
  342                 retval = 0;
  343         }
  344         return (retval);
  345 }
  346 
  347 static __inline int
  348 periph_is_queued(struct cam_periph *periph)
  349 {
  350         return (periph->pinfo.index != CAM_UNQUEUED_INDEX);
  351 }
  352 
  353 static __inline int
  354 device_is_alloc_queued(struct cam_ed *device)
  355 {
  356         return (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
  357 }
  358 
  359 static __inline int
  360 device_is_send_queued(struct cam_ed *device)
  361 {
  362         return (device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
  363 }
  364 
  365 static void
  366 xpt_periph_init()
  367 {
  368         make_dev(&xpt_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "xpt0");
  369 }
  370 
  371 static void
  372 xptdone(struct cam_periph *periph, union ccb *done_ccb)
  373 {
  374         /* Caller will release the CCB */
  375         wakeup(&done_ccb->ccb_h.cbfcnp);
  376 }
  377 
  378 static int
  379 xptopen(struct cdev *dev, int flags, int fmt, struct thread *td)
  380 {
  381 
  382         /*
  383          * Only allow read-write access.
  384          */
  385         if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0))
  386                 return(EPERM);
  387 
  388         /*
  389          * We don't allow nonblocking access.
  390          */
  391         if ((flags & O_NONBLOCK) != 0) {
  392                 printf("%s: can't do nonblocking access\n", devtoname(dev));
  393                 return(ENODEV);
  394         }
  395 
  396         /* Mark ourselves open */
  397         mtx_lock(&xsoftc.xpt_lock);
  398         xsoftc.flags |= XPT_FLAG_OPEN;
  399         mtx_unlock(&xsoftc.xpt_lock);
  400 
  401         return(0);
  402 }
  403 
  404 static int
  405 xptclose(struct cdev *dev, int flag, int fmt, struct thread *td)
  406 {
  407 
  408         /* Mark ourselves closed */
  409         mtx_lock(&xsoftc.xpt_lock);
  410         xsoftc.flags &= ~XPT_FLAG_OPEN;
  411         mtx_unlock(&xsoftc.xpt_lock);
  412 
  413         return(0);
  414 }
  415 
  416 /*
  417  * Don't automatically grab the xpt softc lock here even though this is going
  418  * through the xpt device.  The xpt device is really just a back door for
  419  * accessing other devices and SIMs, so the right thing to do is to grab
  420  * the appropriate SIM lock once the bus/SIM is located.
  421  */
  422 static int
  423 xptioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
  424 {
  425         int error;
  426 
  427         error = 0;
  428 
  429         switch(cmd) {
  430         /*
  431          * For the transport layer CAMIOCOMMAND ioctl, we really only want
  432          * to accept CCB types that don't quite make sense to send through a
  433          * passthrough driver. XPT_PATH_INQ is an exception to this, as stated
  434          * in the CAM spec.
  435          */
  436         case CAMIOCOMMAND: {
  437                 union ccb *ccb;
  438                 union ccb *inccb;
  439                 struct cam_eb *bus;
  440 
  441                 inccb = (union ccb *)addr;
  442 
  443                 bus = xpt_find_bus(inccb->ccb_h.path_id);
  444                 if (bus == NULL) {
  445                         error = EINVAL;
  446                         break;
  447                 }
  448 
  449                 switch(inccb->ccb_h.func_code) {
  450                 case XPT_SCAN_BUS:
  451                 case XPT_RESET_BUS:
  452                         if ((inccb->ccb_h.target_id != CAM_TARGET_WILDCARD)
  453                          || (inccb->ccb_h.target_lun != CAM_LUN_WILDCARD)) {
  454                                 error = EINVAL;
  455                                 break;
  456                         }
  457                         /* FALLTHROUGH */
  458                 case XPT_PATH_INQ:
  459                 case XPT_ENG_INQ:
  460                 case XPT_SCAN_LUN:
  461 
  462                         ccb = xpt_alloc_ccb();
  463 
  464                         CAM_SIM_LOCK(bus->sim);
  465 
  466                         /*
  467                          * Create a path using the bus, target, and lun the
  468                          * user passed in.
  469                          */
  470                         if (xpt_create_path(&ccb->ccb_h.path, xpt_periph,
  471                                             inccb->ccb_h.path_id,
  472                                             inccb->ccb_h.target_id,
  473                                             inccb->ccb_h.target_lun) !=
  474                                             CAM_REQ_CMP){
  475                                 error = EINVAL;
  476                                 CAM_SIM_UNLOCK(bus->sim);
  477                                 xpt_free_ccb(ccb);
  478                                 break;
  479                         }
  480                         /* Ensure all of our fields are correct */
  481                         xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path,
  482                                       inccb->ccb_h.pinfo.priority);
  483                         xpt_merge_ccb(ccb, inccb);
  484                         ccb->ccb_h.cbfcnp = xptdone;
  485                         cam_periph_runccb(ccb, NULL, 0, 0, NULL);
  486                         bcopy(ccb, inccb, sizeof(union ccb));
  487                         xpt_free_path(ccb->ccb_h.path);
  488                         xpt_free_ccb(ccb);
  489                         CAM_SIM_UNLOCK(bus->sim);
  490                         break;
  491 
  492                 case XPT_DEBUG: {
  493                         union ccb ccb;
  494 
  495                         /*
  496                          * This is an immediate CCB, so it's okay to
  497                          * allocate it on the stack.
  498                          */
  499 
  500                         CAM_SIM_LOCK(bus->sim);
  501 
  502                         /*
  503                          * Create a path using the bus, target, and lun the
  504                          * user passed in.
  505                          */
  506                         if (xpt_create_path(&ccb.ccb_h.path, xpt_periph,
  507                                             inccb->ccb_h.path_id,
  508                                             inccb->ccb_h.target_id,
  509                                             inccb->ccb_h.target_lun) !=
  510                                             CAM_REQ_CMP){
  511                                 error = EINVAL;
  512                                 CAM_SIM_UNLOCK(bus->sim);
  513                                 break;
  514                         }
  515                         /* Ensure all of our fields are correct */
  516                         xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path,
  517                                       inccb->ccb_h.pinfo.priority);
  518                         xpt_merge_ccb(&ccb, inccb);
  519                         ccb.ccb_h.cbfcnp = xptdone;
  520                         xpt_action(&ccb);
  521                         CAM_SIM_UNLOCK(bus->sim);
  522                         bcopy(&ccb, inccb, sizeof(union ccb));
  523                         xpt_free_path(ccb.ccb_h.path);
  524                         break;
  525 
  526                 }
  527                 case XPT_DEV_MATCH: {
  528                         struct cam_periph_map_info mapinfo;
  529                         struct cam_path *old_path;
  530 
  531                         /*
  532                          * We can't deal with physical addresses for this
  533                          * type of transaction.
  534                          */
  535                         if (inccb->ccb_h.flags & CAM_DATA_PHYS) {
  536                                 error = EINVAL;
  537                                 break;
  538                         }
  539 
  540                         /*
  541                          * Save this in case the caller had it set to
  542                          * something in particular.
  543                          */
  544                         old_path = inccb->ccb_h.path;
  545 
  546                         /*
  547                          * We really don't need a path for the matching
  548                          * code.  The path is needed because of the
  549                          * debugging statements in xpt_action().  They
  550                          * assume that the CCB has a valid path.
  551                          */
  552                         inccb->ccb_h.path = xpt_periph->path;
  553 
  554                         bzero(&mapinfo, sizeof(mapinfo));
  555 
  556                         /*
  557                          * Map the pattern and match buffers into kernel
  558                          * virtual address space.
  559                          */
  560                         error = cam_periph_mapmem(inccb, &mapinfo);
  561 
  562                         if (error) {
  563                                 inccb->ccb_h.path = old_path;
  564                                 break;
  565                         }
  566 
  567                         /*
  568                          * This is an immediate CCB, we can send it on directly.
  569                          */
  570                         xpt_action(inccb);
  571 
  572                         /*
  573                          * Map the buffers back into user space.
  574                          */
  575                         cam_periph_unmapmem(inccb, &mapinfo);
  576 
  577                         inccb->ccb_h.path = old_path;
  578 
  579                         error = 0;
  580                         break;
  581                 }
  582                 default:
  583                         error = ENOTSUP;
  584                         break;
  585                 }
  586                 xpt_release_bus(bus);
  587                 break;
  588         }
  589         /*
  590          * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input,
  591          * with the periphal driver name and unit name filled in.  The other
  592          * fields don't really matter as input.  The passthrough driver name
  593          * ("pass"), and unit number are passed back in the ccb.  The current
  594          * device generation number, and the index into the device peripheral
  595          * driver list, and the status are also passed back.  Note that
  596          * since we do everything in one pass, unlike the XPT_GDEVLIST ccb,
  597          * we never return a status of CAM_GDEVLIST_LIST_CHANGED.  It is
  598          * (or rather should be) impossible for the device peripheral driver
  599          * list to change since we look at the whole thing in one pass, and
  600          * we do it with lock protection.
  601          *
  602          */
  603         case CAMGETPASSTHRU: {
  604                 union ccb *ccb;
  605                 struct cam_periph *periph;
  606                 struct periph_driver **p_drv;
  607                 char   *name;
  608                 u_int unit;
  609                 u_int cur_generation;
  610                 int base_periph_found;
  611                 int splbreaknum;
  612 
  613                 ccb = (union ccb *)addr;
  614                 unit = ccb->cgdl.unit_number;
  615                 name = ccb->cgdl.periph_name;
  616                 /*
  617                  * Every 100 devices, we want to drop our lock protection to
  618                  * give the software interrupt handler a chance to run.
  619                  * Most systems won't run into this check, but this should
  620                  * avoid starvation in the software interrupt handler in
  621                  * large systems.
  622                  */
  623                 splbreaknum = 100;
  624 
  625                 ccb = (union ccb *)addr;
  626 
  627                 base_periph_found = 0;
  628 
  629                 /*
  630                  * Sanity check -- make sure we don't get a null peripheral
  631                  * driver name.
  632                  */
  633                 if (*ccb->cgdl.periph_name == '\0') {
  634                         error = EINVAL;
  635                         break;
  636                 }
  637 
  638                 /* Keep the list from changing while we traverse it */
  639                 mtx_lock(&xsoftc.xpt_topo_lock);
  640 ptstartover:
  641                 cur_generation = xsoftc.xpt_generation;
  642 
  643                 /* first find our driver in the list of drivers */
  644                 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++)
  645                         if (strcmp((*p_drv)->driver_name, name) == 0)
  646                                 break;
  647 
  648                 if (*p_drv == NULL) {
  649                         mtx_unlock(&xsoftc.xpt_topo_lock);
  650                         ccb->ccb_h.status = CAM_REQ_CMP_ERR;
  651                         ccb->cgdl.status = CAM_GDEVLIST_ERROR;
  652                         *ccb->cgdl.periph_name = '\0';
  653                         ccb->cgdl.unit_number = 0;
  654                         error = ENOENT;
  655                         break;
  656                 }
  657 
  658                 /*
  659                  * Run through every peripheral instance of this driver
  660                  * and check to see whether it matches the unit passed
  661                  * in by the user.  If it does, get out of the loops and
  662                  * find the passthrough driver associated with that
  663                  * peripheral driver.
  664                  */
  665                 for (periph = TAILQ_FIRST(&(*p_drv)->units); periph != NULL;
  666                      periph = TAILQ_NEXT(periph, unit_links)) {
  667 
  668                         if (periph->unit_number == unit) {
  669                                 break;
  670                         } else if (--splbreaknum == 0) {
  671                                 mtx_unlock(&xsoftc.xpt_topo_lock);
  672                                 mtx_lock(&xsoftc.xpt_topo_lock);
  673                                 splbreaknum = 100;
  674                                 if (cur_generation != xsoftc.xpt_generation)
  675                                        goto ptstartover;
  676                         }
  677                 }
  678                 /*
  679                  * If we found the peripheral driver that the user passed
  680                  * in, go through all of the peripheral drivers for that
  681                  * particular device and look for a passthrough driver.
  682                  */
  683                 if (periph != NULL) {
  684                         struct cam_ed *device;
  685                         int i;
  686 
  687                         base_periph_found = 1;
  688                         device = periph->path->device;
  689                         for (i = 0, periph = SLIST_FIRST(&device->periphs);
  690                              periph != NULL;
  691                              periph = SLIST_NEXT(periph, periph_links), i++) {
  692                                 /*
  693                                  * Check to see whether we have a
  694                                  * passthrough device or not.
  695                                  */
  696                                 if (strcmp(periph->periph_name, "pass") == 0) {
  697                                         /*
  698                                          * Fill in the getdevlist fields.
  699                                          */
  700                                         strcpy(ccb->cgdl.periph_name,
  701                                                periph->periph_name);
  702                                         ccb->cgdl.unit_number =
  703                                                 periph->unit_number;
  704                                         if (SLIST_NEXT(periph, periph_links))
  705                                                 ccb->cgdl.status =
  706                                                         CAM_GDEVLIST_MORE_DEVS;
  707                                         else
  708                                                 ccb->cgdl.status =
  709                                                        CAM_GDEVLIST_LAST_DEVICE;
  710                                         ccb->cgdl.generation =
  711                                                 device->generation;
  712                                         ccb->cgdl.index = i;
  713                                         /*
  714                                          * Fill in some CCB header fields
  715                                          * that the user may want.
  716                                          */
  717                                         ccb->ccb_h.path_id =
  718                                                 periph->path->bus->path_id;
  719                                         ccb->ccb_h.target_id =
  720                                                 periph->path->target->target_id;
  721                                         ccb->ccb_h.target_lun =
  722                                                 periph->path->device->lun_id;
  723                                         ccb->ccb_h.status = CAM_REQ_CMP;
  724                                         break;
  725                                 }
  726                         }
  727                 }
  728 
  729                 /*
  730                  * If the periph is null here, one of two things has
  731                  * happened.  The first possibility is that we couldn't
  732                  * find the unit number of the particular peripheral driver
  733                  * that the user is asking about.  e.g. the user asks for
  734                  * the passthrough driver for "da11".  We find the list of
  735                  * "da" peripherals all right, but there is no unit 11.
  736                  * The other possibility is that we went through the list
  737                  * of peripheral drivers attached to the device structure,
  738                  * but didn't find one with the name "pass".  Either way,
  739                  * we return ENOENT, since we couldn't find something.
  740                  */
  741                 if (periph == NULL) {
  742                         ccb->ccb_h.status = CAM_REQ_CMP_ERR;
  743                         ccb->cgdl.status = CAM_GDEVLIST_ERROR;
  744                         *ccb->cgdl.periph_name = '\0';
  745                         ccb->cgdl.unit_number = 0;
  746                         error = ENOENT;
  747                         /*
  748                          * It is unfortunate that this is even necessary,
  749                          * but there are many, many clueless users out there.
  750                          * If this is true, the user is looking for the
  751                          * passthrough driver, but doesn't have one in his
  752                          * kernel.
  753                          */
  754                         if (base_periph_found == 1) {
  755                                 printf("xptioctl: pass driver is not in the "
  756                                        "kernel\n");
  757                                 printf("xptioctl: put \"device pass\" in "
  758                                        "your kernel config file\n");
  759                         }
  760                 }
  761                 mtx_unlock(&xsoftc.xpt_topo_lock);
  762                 break;
  763                 }
  764         default:
  765                 error = ENOTTY;
  766                 break;
  767         }
  768 
  769         return(error);
  770 }
  771 
  772 static int
  773 cam_module_event_handler(module_t mod, int what, void *arg)
  774 {
  775         int error;
  776 
  777         switch (what) {
  778         case MOD_LOAD:
  779                 if ((error = xpt_init(NULL)) != 0)
  780                         return (error);
  781                 break;
  782         case MOD_UNLOAD:
  783                 return EBUSY;
  784         default:
  785                 return EOPNOTSUPP;
  786         }
  787 
  788         return 0;
  789 }
  790 
  791 static void
  792 xpt_rescan_done(struct cam_periph *periph, union ccb *done_ccb)
  793 {
  794 
  795         if (done_ccb->ccb_h.ppriv_ptr1 == NULL) {
  796                 xpt_free_path(done_ccb->ccb_h.path);
  797                 xpt_free_ccb(done_ccb);
  798         } else {
  799                 done_ccb->ccb_h.cbfcnp = done_ccb->ccb_h.ppriv_ptr1;
  800                 (*done_ccb->ccb_h.cbfcnp)(periph, done_ccb);
  801         }
  802         xpt_release_boot();
  803 }
  804 
  805 /* thread to handle bus rescans */
  806 static void
  807 xpt_scanner_thread(void *dummy)
  808 {
  809         union ccb       *ccb;
  810         struct cam_sim  *sim;
  811 
  812         xpt_lock_buses();
  813         for (;;) {
  814                 if (TAILQ_EMPTY(&xsoftc.ccb_scanq))
  815                         msleep(&xsoftc.ccb_scanq, &xsoftc.xpt_topo_lock, PRIBIO,
  816                                "ccb_scanq", 0);
  817                 if ((ccb = (union ccb *)TAILQ_FIRST(&xsoftc.ccb_scanq)) != NULL) {
  818                         TAILQ_REMOVE(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe);
  819                         xpt_unlock_buses();
  820 
  821                         sim = ccb->ccb_h.path->bus->sim;
  822                         CAM_SIM_LOCK(sim);
  823                         xpt_action(ccb);
  824                         CAM_SIM_UNLOCK(sim);
  825 
  826                         xpt_lock_buses();
  827                 }
  828         }
  829 }
  830 
  831 void
  832 xpt_rescan(union ccb *ccb)
  833 {
  834         struct ccb_hdr *hdr;
  835 
  836         /* Prepare request */
  837         if (ccb->ccb_h.path->target->target_id == CAM_TARGET_WILDCARD ||
  838             ccb->ccb_h.path->device->lun_id == CAM_LUN_WILDCARD)
  839                 ccb->ccb_h.func_code = XPT_SCAN_BUS;
  840         else
  841                 ccb->ccb_h.func_code = XPT_SCAN_LUN;
  842         ccb->ccb_h.ppriv_ptr1 = ccb->ccb_h.cbfcnp;
  843         ccb->ccb_h.cbfcnp = xpt_rescan_done;
  844         xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, CAM_PRIORITY_XPT);
  845         /* Don't make duplicate entries for the same paths. */
  846         xpt_lock_buses();
  847         if (ccb->ccb_h.ppriv_ptr1 == NULL) {
  848                 TAILQ_FOREACH(hdr, &xsoftc.ccb_scanq, sim_links.tqe) {
  849                         if (xpt_path_comp(hdr->path, ccb->ccb_h.path) == 0) {
  850                                 wakeup(&xsoftc.ccb_scanq);
  851                                 xpt_unlock_buses();
  852                                 xpt_print(ccb->ccb_h.path, "rescan already queued\n");
  853                                 xpt_free_path(ccb->ccb_h.path);
  854                                 xpt_free_ccb(ccb);
  855                                 return;
  856                         }
  857                 }
  858         }
  859         TAILQ_INSERT_TAIL(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe);
  860         xsoftc.buses_to_config++;
  861         wakeup(&xsoftc.ccb_scanq);
  862         xpt_unlock_buses();
  863 }
  864 
  865 /* Functions accessed by the peripheral drivers */
  866 static int
  867 xpt_init(void *dummy)
  868 {
  869         struct cam_sim *xpt_sim;
  870         struct cam_path *path;
  871         struct cam_devq *devq;
  872         cam_status status;
  873 
  874         TAILQ_INIT(&xsoftc.xpt_busses);
  875         TAILQ_INIT(&cam_simq);
  876         TAILQ_INIT(&xsoftc.ccb_scanq);
  877         STAILQ_INIT(&xsoftc.highpowerq);
  878         xsoftc.num_highpower = CAM_MAX_HIGHPOWER;
  879 
  880         mtx_init(&cam_simq_lock, "CAM SIMQ lock", NULL, MTX_DEF);
  881         mtx_init(&xsoftc.xpt_lock, "XPT lock", NULL, MTX_DEF);
  882         mtx_init(&xsoftc.xpt_topo_lock, "XPT topology lock", NULL, MTX_DEF);
  883 
  884         /*
  885          * The xpt layer is, itself, the equivelent of a SIM.
  886          * Allow 16 ccbs in the ccb pool for it.  This should
  887          * give decent parallelism when we probe busses and
  888          * perform other XPT functions.
  889          */
  890         devq = cam_simq_alloc(16);
  891         xpt_sim = cam_sim_alloc(xptaction,
  892                                 xptpoll,
  893                                 "xpt",
  894                                 /*softc*/NULL,
  895                                 /*unit*/0,
  896                                 /*mtx*/&xsoftc.xpt_lock,
  897                                 /*max_dev_transactions*/0,
  898                                 /*max_tagged_dev_transactions*/0,
  899                                 devq);
  900         if (xpt_sim == NULL)
  901                 return (ENOMEM);
  902 
  903         mtx_lock(&xsoftc.xpt_lock);
  904         if ((status = xpt_bus_register(xpt_sim, NULL, 0)) != CAM_SUCCESS) {
  905                 mtx_unlock(&xsoftc.xpt_lock);
  906                 printf("xpt_init: xpt_bus_register failed with status %#x,"
  907                        " failing attach\n", status);
  908                 return (EINVAL);
  909         }
  910 
  911         /*
  912          * Looking at the XPT from the SIM layer, the XPT is
  913          * the equivelent of a peripheral driver.  Allocate
  914          * a peripheral driver entry for us.
  915          */
  916         if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID,
  917                                       CAM_TARGET_WILDCARD,
  918                                       CAM_LUN_WILDCARD)) != CAM_REQ_CMP) {
  919                 mtx_unlock(&xsoftc.xpt_lock);
  920                 printf("xpt_init: xpt_create_path failed with status %#x,"
  921                        " failing attach\n", status);
  922                 return (EINVAL);
  923         }
  924 
  925         cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO,
  926                          path, NULL, 0, xpt_sim);
  927         xpt_free_path(path);
  928         mtx_unlock(&xsoftc.xpt_lock);
  929         /* Install our software interrupt handlers */
  930         swi_add(NULL, "cambio", camisr, NULL, SWI_CAMBIO, INTR_MPSAFE, &cambio_ih);
  931         /*
  932          * Register a callback for when interrupts are enabled.
  933          */
  934         xsoftc.xpt_config_hook =
  935             (struct intr_config_hook *)malloc(sizeof(struct intr_config_hook),
  936                                               M_CAMXPT, M_NOWAIT | M_ZERO);
  937         if (xsoftc.xpt_config_hook == NULL) {
  938                 printf("xpt_init: Cannot malloc config hook "
  939                        "- failing attach\n");
  940                 return (ENOMEM);
  941         }
  942         xsoftc.xpt_config_hook->ich_func = xpt_config;
  943         if (config_intrhook_establish(xsoftc.xpt_config_hook) != 0) {
  944                 free (xsoftc.xpt_config_hook, M_CAMXPT);
  945                 printf("xpt_init: config_intrhook_establish failed "
  946                        "- failing attach\n");
  947         }
  948 
  949         return (0);
  950 }
  951 
  952 static cam_status
  953 xptregister(struct cam_periph *periph, void *arg)
  954 {
  955         struct cam_sim *xpt_sim;
  956 
  957         if (periph == NULL) {
  958                 printf("xptregister: periph was NULL!!\n");
  959                 return(CAM_REQ_CMP_ERR);
  960         }
  961 
  962         xpt_sim = (struct cam_sim *)arg;
  963         xpt_sim->softc = periph;
  964         xpt_periph = periph;
  965         periph->softc = NULL;
  966 
  967         return(CAM_REQ_CMP);
  968 }
  969 
  970 int32_t
  971 xpt_add_periph(struct cam_periph *periph)
  972 {
  973         struct cam_ed *device;
  974         int32_t  status;
  975         struct periph_list *periph_head;
  976 
  977         mtx_assert(periph->sim->mtx, MA_OWNED);
  978 
  979         device = periph->path->device;
  980 
  981         periph_head = &device->periphs;
  982 
  983         status = CAM_REQ_CMP;
  984 
  985         if (device != NULL) {
  986                 /*
  987                  * Make room for this peripheral
  988                  * so it will fit in the queue
  989                  * when it's scheduled to run
  990                  */
  991                 status = camq_resize(&device->drvq,
  992                                      device->drvq.array_size + 1);
  993 
  994                 device->generation++;
  995 
  996                 SLIST_INSERT_HEAD(periph_head, periph, periph_links);
  997         }
  998 
  999         mtx_lock(&xsoftc.xpt_topo_lock);
 1000         xsoftc.xpt_generation++;
 1001         mtx_unlock(&xsoftc.xpt_topo_lock);
 1002 
 1003         return (status);
 1004 }
 1005 
 1006 void
 1007 xpt_remove_periph(struct cam_periph *periph)
 1008 {
 1009         struct cam_ed *device;
 1010 
 1011         mtx_assert(periph->sim->mtx, MA_OWNED);
 1012 
 1013         device = periph->path->device;
 1014 
 1015         if (device != NULL) {
 1016                 struct periph_list *periph_head;
 1017 
 1018                 periph_head = &device->periphs;
 1019 
 1020                 /* Release the slot for this peripheral */
 1021                 camq_resize(&device->drvq, device->drvq.array_size - 1);
 1022 
 1023                 device->generation++;
 1024 
 1025                 SLIST_REMOVE(periph_head, periph, cam_periph, periph_links);
 1026         }
 1027 
 1028         mtx_lock(&xsoftc.xpt_topo_lock);
 1029         xsoftc.xpt_generation++;
 1030         mtx_unlock(&xsoftc.xpt_topo_lock);
 1031 }
 1032 
 1033 
 1034 void
 1035 xpt_announce_periph(struct cam_periph *periph, char *announce_string)
 1036 {
 1037         struct  cam_path *path = periph->path;
 1038 
 1039         mtx_assert(periph->sim->mtx, MA_OWNED);
 1040 
 1041         printf("%s%d at %s%d bus %d scbus%d target %d lun %d\n",
 1042                periph->periph_name, periph->unit_number,
 1043                path->bus->sim->sim_name,
 1044                path->bus->sim->unit_number,
 1045                path->bus->sim->bus_id,
 1046                path->bus->path_id,
 1047                path->target->target_id,
 1048                path->device->lun_id);
 1049         printf("%s%d: ", periph->periph_name, periph->unit_number);
 1050         if (path->device->protocol == PROTO_SCSI)
 1051                 scsi_print_inquiry(&path->device->inq_data);
 1052         else if (path->device->protocol == PROTO_ATA ||
 1053             path->device->protocol == PROTO_SATAPM)
 1054                 ata_print_ident(&path->device->ident_data);
 1055         else
 1056                 printf("Unknown protocol device\n");
 1057         if (bootverbose && path->device->serial_num_len > 0) {
 1058                 /* Don't wrap the screen  - print only the first 60 chars */
 1059                 printf("%s%d: Serial Number %.60s\n", periph->periph_name,
 1060                        periph->unit_number, path->device->serial_num);
 1061         }
 1062         /* Announce transport details. */
 1063         (*(path->bus->xport->announce))(periph);
 1064         /* Announce command queueing. */
 1065         if (path->device->inq_flags & SID_CmdQue
 1066          || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
 1067                 printf("%s%d: Command Queueing enabled\n",
 1068                        periph->periph_name, periph->unit_number);
 1069         }
 1070         /* Announce caller's details if they've passed in. */
 1071         if (announce_string != NULL)
 1072                 printf("%s%d: %s\n", periph->periph_name,
 1073                        periph->unit_number, announce_string);
 1074 }
 1075 
 1076 static dev_match_ret
 1077 xptbusmatch(struct dev_match_pattern *patterns, u_int num_patterns,
 1078             struct cam_eb *bus)
 1079 {
 1080         dev_match_ret retval;
 1081         int i;
 1082 
 1083         retval = DM_RET_NONE;
 1084 
 1085         /*
 1086          * If we aren't given something to match against, that's an error.
 1087          */
 1088         if (bus == NULL)
 1089                 return(DM_RET_ERROR);
 1090 
 1091         /*
 1092          * If there are no match entries, then this bus matches no
 1093          * matter what.
 1094          */
 1095         if ((patterns == NULL) || (num_patterns == 0))
 1096                 return(DM_RET_DESCEND | DM_RET_COPY);
 1097 
 1098         for (i = 0; i < num_patterns; i++) {
 1099                 struct bus_match_pattern *cur_pattern;
 1100 
 1101                 /*
 1102                  * If the pattern in question isn't for a bus node, we
 1103                  * aren't interested.  However, we do indicate to the
 1104                  * calling routine that we should continue descending the
 1105                  * tree, since the user wants to match against lower-level
 1106                  * EDT elements.
 1107                  */
 1108                 if (patterns[i].type != DEV_MATCH_BUS) {
 1109                         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
 1110                                 retval |= DM_RET_DESCEND;
 1111                         continue;
 1112                 }
 1113 
 1114                 cur_pattern = &patterns[i].pattern.bus_pattern;
 1115 
 1116                 /*
 1117                  * If they want to match any bus node, we give them any
 1118                  * device node.
 1119                  */
 1120                 if (cur_pattern->flags == BUS_MATCH_ANY) {
 1121                         /* set the copy flag */
 1122                         retval |= DM_RET_COPY;
 1123 
 1124                         /*
 1125                          * If we've already decided on an action, go ahead
 1126                          * and return.
 1127                          */
 1128                         if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
 1129                                 return(retval);
 1130                 }
 1131 
 1132                 /*
 1133                  * Not sure why someone would do this...
 1134                  */
 1135                 if (cur_pattern->flags == BUS_MATCH_NONE)
 1136                         continue;
 1137 
 1138                 if (((cur_pattern->flags & BUS_MATCH_PATH) != 0)
 1139                  && (cur_pattern->path_id != bus->path_id))
 1140                         continue;
 1141 
 1142                 if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0)
 1143                  && (cur_pattern->bus_id != bus->sim->bus_id))
 1144                         continue;
 1145 
 1146                 if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0)
 1147                  && (cur_pattern->unit_number != bus->sim->unit_number))
 1148                         continue;
 1149 
 1150                 if (((cur_pattern->flags & BUS_MATCH_NAME) != 0)
 1151                  && (strncmp(cur_pattern->dev_name, bus->sim->sim_name,
 1152                              DEV_IDLEN) != 0))
 1153                         continue;
 1154 
 1155                 /*
 1156                  * If we get to this point, the user definitely wants
 1157                  * information on this bus.  So tell the caller to copy the
 1158                  * data out.
 1159                  */
 1160                 retval |= DM_RET_COPY;
 1161 
 1162                 /*
 1163                  * If the return action has been set to descend, then we
 1164                  * know that we've already seen a non-bus matching
 1165                  * expression, therefore we need to further descend the tree.
 1166                  * This won't change by continuing around the loop, so we
 1167                  * go ahead and return.  If we haven't seen a non-bus
 1168                  * matching expression, we keep going around the loop until
 1169                  * we exhaust the matching expressions.  We'll set the stop
 1170                  * flag once we fall out of the loop.
 1171                  */
 1172                 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
 1173                         return(retval);
 1174         }
 1175 
 1176         /*
 1177          * If the return action hasn't been set to descend yet, that means
 1178          * we haven't seen anything other than bus matching patterns.  So
 1179          * tell the caller to stop descending the tree -- the user doesn't
 1180          * want to match against lower level tree elements.
 1181          */
 1182         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
 1183                 retval |= DM_RET_STOP;
 1184 
 1185         return(retval);
 1186 }
 1187 
 1188 static dev_match_ret
 1189 xptdevicematch(struct dev_match_pattern *patterns, u_int num_patterns,
 1190                struct cam_ed *device)
 1191 {
 1192         dev_match_ret retval;
 1193         int i;
 1194 
 1195         retval = DM_RET_NONE;
 1196 
 1197         /*
 1198          * If we aren't given something to match against, that's an error.
 1199          */
 1200         if (device == NULL)
 1201                 return(DM_RET_ERROR);
 1202 
 1203         /*
 1204          * If there are no match entries, then this device matches no
 1205          * matter what.
 1206          */
 1207         if ((patterns == NULL) || (num_patterns == 0))
 1208                 return(DM_RET_DESCEND | DM_RET_COPY);
 1209 
 1210         for (i = 0; i < num_patterns; i++) {
 1211                 struct device_match_pattern *cur_pattern;
 1212 
 1213                 /*
 1214                  * If the pattern in question isn't for a device node, we
 1215                  * aren't interested.
 1216                  */
 1217                 if (patterns[i].type != DEV_MATCH_DEVICE) {
 1218                         if ((patterns[i].type == DEV_MATCH_PERIPH)
 1219                          && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE))
 1220                                 retval |= DM_RET_DESCEND;
 1221                         continue;
 1222                 }
 1223 
 1224                 cur_pattern = &patterns[i].pattern.device_pattern;
 1225 
 1226                 /*
 1227                  * If they want to match any device node, we give them any
 1228                  * device node.
 1229                  */
 1230                 if (cur_pattern->flags == DEV_MATCH_ANY) {
 1231                         /* set the copy flag */
 1232                         retval |= DM_RET_COPY;
 1233 
 1234 
 1235                         /*
 1236                          * If we've already decided on an action, go ahead
 1237                          * and return.
 1238                          */
 1239                         if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
 1240                                 return(retval);
 1241                 }
 1242 
 1243                 /*
 1244                  * Not sure why someone would do this...
 1245                  */
 1246                 if (cur_pattern->flags == DEV_MATCH_NONE)
 1247                         continue;
 1248 
 1249                 if (((cur_pattern->flags & DEV_MATCH_PATH) != 0)
 1250                  && (cur_pattern->path_id != device->target->bus->path_id))
 1251                         continue;
 1252 
 1253                 if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0)
 1254                  && (cur_pattern->target_id != device->target->target_id))
 1255                         continue;
 1256 
 1257                 if (((cur_pattern->flags & DEV_MATCH_LUN) != 0)
 1258                  && (cur_pattern->target_lun != device->lun_id))
 1259                         continue;
 1260 
 1261                 if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0)
 1262                  && (cam_quirkmatch((caddr_t)&device->inq_data,
 1263                                     (caddr_t)&cur_pattern->inq_pat,
 1264                                     1, sizeof(cur_pattern->inq_pat),
 1265                                     scsi_static_inquiry_match) == NULL))
 1266                         continue;
 1267 
 1268                 /*
 1269                  * If we get to this point, the user definitely wants
 1270                  * information on this device.  So tell the caller to copy
 1271                  * the data out.
 1272                  */
 1273                 retval |= DM_RET_COPY;
 1274 
 1275                 /*
 1276                  * If the return action has been set to descend, then we
 1277                  * know that we've already seen a peripheral matching
 1278                  * expression, therefore we need to further descend the tree.
 1279                  * This won't change by continuing around the loop, so we
 1280                  * go ahead and return.  If we haven't seen a peripheral
 1281                  * matching expression, we keep going around the loop until
 1282                  * we exhaust the matching expressions.  We'll set the stop
 1283                  * flag once we fall out of the loop.
 1284                  */
 1285                 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
 1286                         return(retval);
 1287         }
 1288 
 1289         /*
 1290          * If the return action hasn't been set to descend yet, that means
 1291          * we haven't seen any peripheral matching patterns.  So tell the
 1292          * caller to stop descending the tree -- the user doesn't want to
 1293          * match against lower level tree elements.
 1294          */
 1295         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
 1296                 retval |= DM_RET_STOP;
 1297 
 1298         return(retval);
 1299 }
 1300 
 1301 /*
 1302  * Match a single peripheral against any number of match patterns.
 1303  */
 1304 static dev_match_ret
 1305 xptperiphmatch(struct dev_match_pattern *patterns, u_int num_patterns,
 1306                struct cam_periph *periph)
 1307 {
 1308         dev_match_ret retval;
 1309         int i;
 1310 
 1311         /*
 1312          * If we aren't given something to match against, that's an error.
 1313          */
 1314         if (periph == NULL)
 1315                 return(DM_RET_ERROR);
 1316 
 1317         /*
 1318          * If there are no match entries, then this peripheral matches no
 1319          * matter what.
 1320          */
 1321         if ((patterns == NULL) || (num_patterns == 0))
 1322                 return(DM_RET_STOP | DM_RET_COPY);
 1323 
 1324         /*
 1325          * There aren't any nodes below a peripheral node, so there's no
 1326          * reason to descend the tree any further.
 1327          */
 1328         retval = DM_RET_STOP;
 1329 
 1330         for (i = 0; i < num_patterns; i++) {
 1331                 struct periph_match_pattern *cur_pattern;
 1332 
 1333                 /*
 1334                  * If the pattern in question isn't for a peripheral, we
 1335                  * aren't interested.
 1336                  */
 1337                 if (patterns[i].type != DEV_MATCH_PERIPH)
 1338                         continue;
 1339 
 1340                 cur_pattern = &patterns[i].pattern.periph_pattern;
 1341 
 1342                 /*
 1343                  * If they want to match on anything, then we will do so.
 1344                  */
 1345                 if (cur_pattern->flags == PERIPH_MATCH_ANY) {
 1346                         /* set the copy flag */
 1347                         retval |= DM_RET_COPY;
 1348 
 1349                         /*
 1350                          * We've already set the return action to stop,
 1351                          * since there are no nodes below peripherals in
 1352                          * the tree.
 1353                          */
 1354                         return(retval);
 1355                 }
 1356 
 1357                 /*
 1358                  * Not sure why someone would do this...
 1359                  */
 1360                 if (cur_pattern->flags == PERIPH_MATCH_NONE)
 1361                         continue;
 1362 
 1363                 if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0)
 1364                  && (cur_pattern->path_id != periph->path->bus->path_id))
 1365                         continue;
 1366 
 1367                 /*
 1368                  * For the target and lun id's, we have to make sure the
 1369                  * target and lun pointers aren't NULL.  The xpt peripheral
 1370                  * has a wildcard target and device.
 1371                  */
 1372                 if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0)
 1373                  && ((periph->path->target == NULL)
 1374                  ||(cur_pattern->target_id != periph->path->target->target_id)))
 1375                         continue;
 1376 
 1377                 if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0)
 1378                  && ((periph->path->device == NULL)
 1379                  || (cur_pattern->target_lun != periph->path->device->lun_id)))
 1380                         continue;
 1381 
 1382                 if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0)
 1383                  && (cur_pattern->unit_number != periph->unit_number))
 1384                         continue;
 1385 
 1386                 if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0)
 1387                  && (strncmp(cur_pattern->periph_name, periph->periph_name,
 1388                              DEV_IDLEN) != 0))
 1389                         continue;
 1390 
 1391                 /*
 1392                  * If we get to this point, the user definitely wants
 1393                  * information on this peripheral.  So tell the caller to
 1394                  * copy the data out.
 1395                  */
 1396                 retval |= DM_RET_COPY;
 1397 
 1398                 /*
 1399                  * The return action has already been set to stop, since
 1400                  * peripherals don't have any nodes below them in the EDT.
 1401                  */
 1402                 return(retval);
 1403         }
 1404 
 1405         /*
 1406          * If we get to this point, the peripheral that was passed in
 1407          * doesn't match any of the patterns.
 1408          */
 1409         return(retval);
 1410 }
 1411 
 1412 static int
 1413 xptedtbusfunc(struct cam_eb *bus, void *arg)
 1414 {
 1415         struct ccb_dev_match *cdm;
 1416         dev_match_ret retval;
 1417 
 1418         cdm = (struct ccb_dev_match *)arg;
 1419 
 1420         /*
 1421          * If our position is for something deeper in the tree, that means
 1422          * that we've already seen this node.  So, we keep going down.
 1423          */
 1424         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 1425          && (cdm->pos.cookie.bus == bus)
 1426          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 1427          && (cdm->pos.cookie.target != NULL))
 1428                 retval = DM_RET_DESCEND;
 1429         else
 1430                 retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus);
 1431 
 1432         /*
 1433          * If we got an error, bail out of the search.
 1434          */
 1435         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
 1436                 cdm->status = CAM_DEV_MATCH_ERROR;
 1437                 return(0);
 1438         }
 1439 
 1440         /*
 1441          * If the copy flag is set, copy this bus out.
 1442          */
 1443         if (retval & DM_RET_COPY) {
 1444                 int spaceleft, j;
 1445 
 1446                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
 1447                         sizeof(struct dev_match_result));
 1448 
 1449                 /*
 1450                  * If we don't have enough space to put in another
 1451                  * match result, save our position and tell the
 1452                  * user there are more devices to check.
 1453                  */
 1454                 if (spaceleft < sizeof(struct dev_match_result)) {
 1455                         bzero(&cdm->pos, sizeof(cdm->pos));
 1456                         cdm->pos.position_type =
 1457                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS;
 1458 
 1459                         cdm->pos.cookie.bus = bus;
 1460                         cdm->pos.generations[CAM_BUS_GENERATION]=
 1461                                 xsoftc.bus_generation;
 1462                         cdm->status = CAM_DEV_MATCH_MORE;
 1463                         return(0);
 1464                 }
 1465                 j = cdm->num_matches;
 1466                 cdm->num_matches++;
 1467                 cdm->matches[j].type = DEV_MATCH_BUS;
 1468                 cdm->matches[j].result.bus_result.path_id = bus->path_id;
 1469                 cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id;
 1470                 cdm->matches[j].result.bus_result.unit_number =
 1471                         bus->sim->unit_number;
 1472                 strncpy(cdm->matches[j].result.bus_result.dev_name,
 1473                         bus->sim->sim_name, DEV_IDLEN);
 1474         }
 1475 
 1476         /*
 1477          * If the user is only interested in busses, there's no
 1478          * reason to descend to the next level in the tree.
 1479          */
 1480         if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
 1481                 return(1);
 1482 
 1483         /*
 1484          * If there is a target generation recorded, check it to
 1485          * make sure the target list hasn't changed.
 1486          */
 1487         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 1488          && (bus == cdm->pos.cookie.bus)
 1489          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 1490          && (cdm->pos.generations[CAM_TARGET_GENERATION] != 0)
 1491          && (cdm->pos.generations[CAM_TARGET_GENERATION] !=
 1492              bus->generation)) {
 1493                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
 1494                 return(0);
 1495         }
 1496 
 1497         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 1498          && (cdm->pos.cookie.bus == bus)
 1499          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 1500          && (cdm->pos.cookie.target != NULL))
 1501                 return(xpttargettraverse(bus,
 1502                                         (struct cam_et *)cdm->pos.cookie.target,
 1503                                          xptedttargetfunc, arg));
 1504         else
 1505                 return(xpttargettraverse(bus, NULL, xptedttargetfunc, arg));
 1506 }
 1507 
 1508 static int
 1509 xptedttargetfunc(struct cam_et *target, void *arg)
 1510 {
 1511         struct ccb_dev_match *cdm;
 1512 
 1513         cdm = (struct ccb_dev_match *)arg;
 1514 
 1515         /*
 1516          * If there is a device list generation recorded, check it to
 1517          * make sure the device list hasn't changed.
 1518          */
 1519         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 1520          && (cdm->pos.cookie.bus == target->bus)
 1521          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 1522          && (cdm->pos.cookie.target == target)
 1523          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
 1524          && (cdm->pos.generations[CAM_DEV_GENERATION] != 0)
 1525          && (cdm->pos.generations[CAM_DEV_GENERATION] !=
 1526              target->generation)) {
 1527                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
 1528                 return(0);
 1529         }
 1530 
 1531         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 1532          && (cdm->pos.cookie.bus == target->bus)
 1533          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 1534          && (cdm->pos.cookie.target == target)
 1535          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
 1536          && (cdm->pos.cookie.device != NULL))
 1537                 return(xptdevicetraverse(target,
 1538                                         (struct cam_ed *)cdm->pos.cookie.device,
 1539                                          xptedtdevicefunc, arg));
 1540         else
 1541                 return(xptdevicetraverse(target, NULL, xptedtdevicefunc, arg));
 1542 }
 1543 
 1544 static int
 1545 xptedtdevicefunc(struct cam_ed *device, void *arg)
 1546 {
 1547 
 1548         struct ccb_dev_match *cdm;
 1549         dev_match_ret retval;
 1550 
 1551         cdm = (struct ccb_dev_match *)arg;
 1552 
 1553         /*
 1554          * If our position is for something deeper in the tree, that means
 1555          * that we've already seen this node.  So, we keep going down.
 1556          */
 1557         if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE)
 1558          && (cdm->pos.cookie.device == device)
 1559          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
 1560          && (cdm->pos.cookie.periph != NULL))
 1561                 retval = DM_RET_DESCEND;
 1562         else
 1563                 retval = xptdevicematch(cdm->patterns, cdm->num_patterns,
 1564                                         device);
 1565 
 1566         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
 1567                 cdm->status = CAM_DEV_MATCH_ERROR;
 1568                 return(0);
 1569         }
 1570 
 1571         /*
 1572          * If the copy flag is set, copy this device out.
 1573          */
 1574         if (retval & DM_RET_COPY) {
 1575                 int spaceleft, j;
 1576 
 1577                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
 1578                         sizeof(struct dev_match_result));
 1579 
 1580                 /*
 1581                  * If we don't have enough space to put in another
 1582                  * match result, save our position and tell the
 1583                  * user there are more devices to check.
 1584                  */
 1585                 if (spaceleft < sizeof(struct dev_match_result)) {
 1586                         bzero(&cdm->pos, sizeof(cdm->pos));
 1587                         cdm->pos.position_type =
 1588                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
 1589                                 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE;
 1590 
 1591                         cdm->pos.cookie.bus = device->target->bus;
 1592                         cdm->pos.generations[CAM_BUS_GENERATION]=
 1593                                 xsoftc.bus_generation;
 1594                         cdm->pos.cookie.target = device->target;
 1595                         cdm->pos.generations[CAM_TARGET_GENERATION] =
 1596                                 device->target->bus->generation;
 1597                         cdm->pos.cookie.device = device;
 1598                         cdm->pos.generations[CAM_DEV_GENERATION] =
 1599                                 device->target->generation;
 1600                         cdm->status = CAM_DEV_MATCH_MORE;
 1601                         return(0);
 1602                 }
 1603                 j = cdm->num_matches;
 1604                 cdm->num_matches++;
 1605                 cdm->matches[j].type = DEV_MATCH_DEVICE;
 1606                 cdm->matches[j].result.device_result.path_id =
 1607                         device->target->bus->path_id;
 1608                 cdm->matches[j].result.device_result.target_id =
 1609                         device->target->target_id;
 1610                 cdm->matches[j].result.device_result.target_lun =
 1611                         device->lun_id;
 1612                 cdm->matches[j].result.device_result.protocol =
 1613                         device->protocol;
 1614                 bcopy(&device->inq_data,
 1615                       &cdm->matches[j].result.device_result.inq_data,
 1616                       sizeof(struct scsi_inquiry_data));
 1617                 bcopy(&device->ident_data,
 1618                       &cdm->matches[j].result.device_result.ident_data,
 1619                       sizeof(struct ata_params));
 1620 
 1621                 /* Let the user know whether this device is unconfigured */
 1622                 if (device->flags & CAM_DEV_UNCONFIGURED)
 1623                         cdm->matches[j].result.device_result.flags =
 1624                                 DEV_RESULT_UNCONFIGURED;
 1625                 else
 1626                         cdm->matches[j].result.device_result.flags =
 1627                                 DEV_RESULT_NOFLAG;
 1628         }
 1629 
 1630         /*
 1631          * If the user isn't interested in peripherals, don't descend
 1632          * the tree any further.
 1633          */
 1634         if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
 1635                 return(1);
 1636 
 1637         /*
 1638          * If there is a peripheral list generation recorded, make sure
 1639          * it hasn't changed.
 1640          */
 1641         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 1642          && (device->target->bus == cdm->pos.cookie.bus)
 1643          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 1644          && (device->target == cdm->pos.cookie.target)
 1645          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
 1646          && (device == cdm->pos.cookie.device)
 1647          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
 1648          && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
 1649          && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
 1650              device->generation)){
 1651                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
 1652                 return(0);
 1653         }
 1654 
 1655         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 1656          && (cdm->pos.cookie.bus == device->target->bus)
 1657          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 1658          && (cdm->pos.cookie.target == device->target)
 1659          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
 1660          && (cdm->pos.cookie.device == device)
 1661          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
 1662          && (cdm->pos.cookie.periph != NULL))
 1663                 return(xptperiphtraverse(device,
 1664                                 (struct cam_periph *)cdm->pos.cookie.periph,
 1665                                 xptedtperiphfunc, arg));
 1666         else
 1667                 return(xptperiphtraverse(device, NULL, xptedtperiphfunc, arg));
 1668 }
 1669 
 1670 static int
 1671 xptedtperiphfunc(struct cam_periph *periph, void *arg)
 1672 {
 1673         struct ccb_dev_match *cdm;
 1674         dev_match_ret retval;
 1675 
 1676         cdm = (struct ccb_dev_match *)arg;
 1677 
 1678         retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
 1679 
 1680         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
 1681                 cdm->status = CAM_DEV_MATCH_ERROR;
 1682                 return(0);
 1683         }
 1684 
 1685         /*
 1686          * If the copy flag is set, copy this peripheral out.
 1687          */
 1688         if (retval & DM_RET_COPY) {
 1689                 int spaceleft, j;
 1690 
 1691                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
 1692                         sizeof(struct dev_match_result));
 1693 
 1694                 /*
 1695                  * If we don't have enough space to put in another
 1696                  * match result, save our position and tell the
 1697                  * user there are more devices to check.
 1698                  */
 1699                 if (spaceleft < sizeof(struct dev_match_result)) {
 1700                         bzero(&cdm->pos, sizeof(cdm->pos));
 1701                         cdm->pos.position_type =
 1702                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
 1703                                 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE |
 1704                                 CAM_DEV_POS_PERIPH;
 1705 
 1706                         cdm->pos.cookie.bus = periph->path->bus;
 1707                         cdm->pos.generations[CAM_BUS_GENERATION]=
 1708                                 xsoftc.bus_generation;
 1709                         cdm->pos.cookie.target = periph->path->target;
 1710                         cdm->pos.generations[CAM_TARGET_GENERATION] =
 1711                                 periph->path->bus->generation;
 1712                         cdm->pos.cookie.device = periph->path->device;
 1713                         cdm->pos.generations[CAM_DEV_GENERATION] =
 1714                                 periph->path->target->generation;
 1715                         cdm->pos.cookie.periph = periph;
 1716                         cdm->pos.generations[CAM_PERIPH_GENERATION] =
 1717                                 periph->path->device->generation;
 1718                         cdm->status = CAM_DEV_MATCH_MORE;
 1719                         return(0);
 1720                 }
 1721 
 1722                 j = cdm->num_matches;
 1723                 cdm->num_matches++;
 1724                 cdm->matches[j].type = DEV_MATCH_PERIPH;
 1725                 cdm->matches[j].result.periph_result.path_id =
 1726                         periph->path->bus->path_id;
 1727                 cdm->matches[j].result.periph_result.target_id =
 1728                         periph->path->target->target_id;
 1729                 cdm->matches[j].result.periph_result.target_lun =
 1730                         periph->path->device->lun_id;
 1731                 cdm->matches[j].result.periph_result.unit_number =
 1732                         periph->unit_number;
 1733                 strncpy(cdm->matches[j].result.periph_result.periph_name,
 1734                         periph->periph_name, DEV_IDLEN);
 1735         }
 1736 
 1737         return(1);
 1738 }
 1739 
 1740 static int
 1741 xptedtmatch(struct ccb_dev_match *cdm)
 1742 {
 1743         int ret;
 1744 
 1745         cdm->num_matches = 0;
 1746 
 1747         /*
 1748          * Check the bus list generation.  If it has changed, the user
 1749          * needs to reset everything and start over.
 1750          */
 1751         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 1752          && (cdm->pos.generations[CAM_BUS_GENERATION] != 0)
 1753          && (cdm->pos.generations[CAM_BUS_GENERATION] != xsoftc.bus_generation)) {
 1754                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
 1755                 return(0);
 1756         }
 1757 
 1758         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 1759          && (cdm->pos.cookie.bus != NULL))
 1760                 ret = xptbustraverse((struct cam_eb *)cdm->pos.cookie.bus,
 1761                                      xptedtbusfunc, cdm);
 1762         else
 1763                 ret = xptbustraverse(NULL, xptedtbusfunc, cdm);
 1764 
 1765         /*
 1766          * If we get back 0, that means that we had to stop before fully
 1767          * traversing the EDT.  It also means that one of the subroutines
 1768          * has set the status field to the proper value.  If we get back 1,
 1769          * we've fully traversed the EDT and copied out any matching entries.
 1770          */
 1771         if (ret == 1)
 1772                 cdm->status = CAM_DEV_MATCH_LAST;
 1773 
 1774         return(ret);
 1775 }
 1776 
 1777 static int
 1778 xptplistpdrvfunc(struct periph_driver **pdrv, void *arg)
 1779 {
 1780         struct ccb_dev_match *cdm;
 1781 
 1782         cdm = (struct ccb_dev_match *)arg;
 1783 
 1784         if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
 1785          && (cdm->pos.cookie.pdrv == pdrv)
 1786          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
 1787          && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
 1788          && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
 1789              (*pdrv)->generation)) {
 1790                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
 1791                 return(0);
 1792         }
 1793 
 1794         if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
 1795          && (cdm->pos.cookie.pdrv == pdrv)
 1796          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
 1797          && (cdm->pos.cookie.periph != NULL))
 1798                 return(xptpdperiphtraverse(pdrv,
 1799                                 (struct cam_periph *)cdm->pos.cookie.periph,
 1800                                 xptplistperiphfunc, arg));
 1801         else
 1802                 return(xptpdperiphtraverse(pdrv, NULL,xptplistperiphfunc, arg));
 1803 }
 1804 
 1805 static int
 1806 xptplistperiphfunc(struct cam_periph *periph, void *arg)
 1807 {
 1808         struct ccb_dev_match *cdm;
 1809         dev_match_ret retval;
 1810 
 1811         cdm = (struct ccb_dev_match *)arg;
 1812 
 1813         retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
 1814 
 1815         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
 1816                 cdm->status = CAM_DEV_MATCH_ERROR;
 1817                 return(0);
 1818         }
 1819 
 1820         /*
 1821          * If the copy flag is set, copy this peripheral out.
 1822          */
 1823         if (retval & DM_RET_COPY) {
 1824                 int spaceleft, j;
 1825 
 1826                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
 1827                         sizeof(struct dev_match_result));
 1828 
 1829                 /*
 1830                  * If we don't have enough space to put in another
 1831                  * match result, save our position and tell the
 1832                  * user there are more devices to check.
 1833                  */
 1834                 if (spaceleft < sizeof(struct dev_match_result)) {
 1835                         struct periph_driver **pdrv;
 1836 
 1837                         pdrv = NULL;
 1838                         bzero(&cdm->pos, sizeof(cdm->pos));
 1839                         cdm->pos.position_type =
 1840                                 CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR |
 1841                                 CAM_DEV_POS_PERIPH;
 1842 
 1843                         /*
 1844                          * This may look a bit non-sensical, but it is
 1845                          * actually quite logical.  There are very few
 1846                          * peripheral drivers, and bloating every peripheral
 1847                          * structure with a pointer back to its parent
 1848                          * peripheral driver linker set entry would cost
 1849                          * more in the long run than doing this quick lookup.
 1850                          */
 1851                         for (pdrv = periph_drivers; *pdrv != NULL; pdrv++) {
 1852                                 if (strcmp((*pdrv)->driver_name,
 1853                                     periph->periph_name) == 0)
 1854                                         break;
 1855                         }
 1856 
 1857                         if (*pdrv == NULL) {
 1858                                 cdm->status = CAM_DEV_MATCH_ERROR;
 1859                                 return(0);
 1860                         }
 1861 
 1862                         cdm->pos.cookie.pdrv = pdrv;
 1863                         /*
 1864                          * The periph generation slot does double duty, as
 1865                          * does the periph pointer slot.  They are used for
 1866                          * both edt and pdrv lookups and positioning.
 1867                          */
 1868                         cdm->pos.cookie.periph = periph;
 1869                         cdm->pos.generations[CAM_PERIPH_GENERATION] =
 1870                                 (*pdrv)->generation;
 1871                         cdm->status = CAM_DEV_MATCH_MORE;
 1872                         return(0);
 1873                 }
 1874 
 1875                 j = cdm->num_matches;
 1876                 cdm->num_matches++;
 1877                 cdm->matches[j].type = DEV_MATCH_PERIPH;
 1878                 cdm->matches[j].result.periph_result.path_id =
 1879                         periph->path->bus->path_id;
 1880 
 1881                 /*
 1882                  * The transport layer peripheral doesn't have a target or
 1883                  * lun.
 1884                  */
 1885                 if (periph->path->target)
 1886                         cdm->matches[j].result.periph_result.target_id =
 1887                                 periph->path->target->target_id;
 1888                 else
 1889                         cdm->matches[j].result.periph_result.target_id = -1;
 1890 
 1891                 if (periph->path->device)
 1892                         cdm->matches[j].result.periph_result.target_lun =
 1893                                 periph->path->device->lun_id;
 1894                 else
 1895                         cdm->matches[j].result.periph_result.target_lun = -1;
 1896 
 1897                 cdm->matches[j].result.periph_result.unit_number =
 1898                         periph->unit_number;
 1899                 strncpy(cdm->matches[j].result.periph_result.periph_name,
 1900                         periph->periph_name, DEV_IDLEN);
 1901         }
 1902 
 1903         return(1);
 1904 }
 1905 
 1906 static int
 1907 xptperiphlistmatch(struct ccb_dev_match *cdm)
 1908 {
 1909         int ret;
 1910 
 1911         cdm->num_matches = 0;
 1912 
 1913         /*
 1914          * At this point in the edt traversal function, we check the bus
 1915          * list generation to make sure that no busses have been added or
 1916          * removed since the user last sent a XPT_DEV_MATCH ccb through.
 1917          * For the peripheral driver list traversal function, however, we
 1918          * don't have to worry about new peripheral driver types coming or
 1919          * going; they're in a linker set, and therefore can't change
 1920          * without a recompile.
 1921          */
 1922 
 1923         if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
 1924          && (cdm->pos.cookie.pdrv != NULL))
 1925                 ret = xptpdrvtraverse(
 1926                                 (struct periph_driver **)cdm->pos.cookie.pdrv,
 1927                                 xptplistpdrvfunc, cdm);
 1928         else
 1929                 ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm);
 1930 
 1931         /*
 1932          * If we get back 0, that means that we had to stop before fully
 1933          * traversing the peripheral driver tree.  It also means that one of
 1934          * the subroutines has set the status field to the proper value.  If
 1935          * we get back 1, we've fully traversed the EDT and copied out any
 1936          * matching entries.
 1937          */
 1938         if (ret == 1)
 1939                 cdm->status = CAM_DEV_MATCH_LAST;
 1940 
 1941         return(ret);
 1942 }
 1943 
 1944 static int
 1945 xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg)
 1946 {
 1947         struct cam_eb *bus, *next_bus;
 1948         int retval;
 1949 
 1950         retval = 1;
 1951 
 1952         mtx_lock(&xsoftc.xpt_topo_lock);
 1953         for (bus = (start_bus ? start_bus : TAILQ_FIRST(&xsoftc.xpt_busses));
 1954              bus != NULL;
 1955              bus = next_bus) {
 1956                 next_bus = TAILQ_NEXT(bus, links);
 1957 
 1958                 mtx_unlock(&xsoftc.xpt_topo_lock);
 1959                 CAM_SIM_LOCK(bus->sim);
 1960                 retval = tr_func(bus, arg);
 1961                 CAM_SIM_UNLOCK(bus->sim);
 1962                 if (retval == 0)
 1963                         return(retval);
 1964                 mtx_lock(&xsoftc.xpt_topo_lock);
 1965         }
 1966         mtx_unlock(&xsoftc.xpt_topo_lock);
 1967 
 1968         return(retval);
 1969 }
 1970 
 1971 int
 1972 xpt_sim_opened(struct cam_sim *sim)
 1973 {
 1974         struct cam_eb *bus;
 1975         struct cam_et *target;
 1976         struct cam_ed *device;
 1977         struct cam_periph *periph;
 1978 
 1979         KASSERT(sim->refcount >= 1, ("sim->refcount >= 1"));
 1980         mtx_assert(sim->mtx, MA_OWNED);
 1981 
 1982         mtx_lock(&xsoftc.xpt_topo_lock);
 1983         TAILQ_FOREACH(bus, &xsoftc.xpt_busses, links) {
 1984                 if (bus->sim != sim)
 1985                         continue;
 1986 
 1987                 TAILQ_FOREACH(target, &bus->et_entries, links) {
 1988                         TAILQ_FOREACH(device, &target->ed_entries, links) {
 1989                                 SLIST_FOREACH(periph, &device->periphs,
 1990                                     periph_links) {
 1991                                         if (periph->refcount > 0) {
 1992                                                 mtx_unlock(&xsoftc.xpt_topo_lock);
 1993                                                 return (1);
 1994                                         }
 1995                                 }
 1996                         }
 1997                 }
 1998         }
 1999 
 2000         mtx_unlock(&xsoftc.xpt_topo_lock);
 2001         return (0);
 2002 }
 2003 
 2004 static int
 2005 xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target,
 2006                   xpt_targetfunc_t *tr_func, void *arg)
 2007 {
 2008         struct cam_et *target, *next_target;
 2009         int retval;
 2010 
 2011         retval = 1;
 2012         for (target = (start_target ? start_target :
 2013                        TAILQ_FIRST(&bus->et_entries));
 2014              target != NULL; target = next_target) {
 2015 
 2016                 next_target = TAILQ_NEXT(target, links);
 2017 
 2018                 retval = tr_func(target, arg);
 2019 
 2020                 if (retval == 0)
 2021                         return(retval);
 2022         }
 2023 
 2024         return(retval);
 2025 }
 2026 
 2027 static int
 2028 xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device,
 2029                   xpt_devicefunc_t *tr_func, void *arg)
 2030 {
 2031         struct cam_ed *device, *next_device;
 2032         int retval;
 2033 
 2034         retval = 1;
 2035         for (device = (start_device ? start_device :
 2036                        TAILQ_FIRST(&target->ed_entries));
 2037              device != NULL;
 2038              device = next_device) {
 2039 
 2040                 next_device = TAILQ_NEXT(device, links);
 2041 
 2042                 retval = tr_func(device, arg);
 2043 
 2044                 if (retval == 0)
 2045                         return(retval);
 2046         }
 2047 
 2048         return(retval);
 2049 }
 2050 
 2051 static int
 2052 xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph,
 2053                   xpt_periphfunc_t *tr_func, void *arg)
 2054 {
 2055         struct cam_periph *periph, *next_periph;
 2056         int retval;
 2057 
 2058         retval = 1;
 2059 
 2060         for (periph = (start_periph ? start_periph :
 2061                        SLIST_FIRST(&device->periphs));
 2062              periph != NULL;
 2063              periph = next_periph) {
 2064 
 2065                 next_periph = SLIST_NEXT(periph, periph_links);
 2066 
 2067                 retval = tr_func(periph, arg);
 2068                 if (retval == 0)
 2069                         return(retval);
 2070         }
 2071 
 2072         return(retval);
 2073 }
 2074 
 2075 static int
 2076 xptpdrvtraverse(struct periph_driver **start_pdrv,
 2077                 xpt_pdrvfunc_t *tr_func, void *arg)
 2078 {
 2079         struct periph_driver **pdrv;
 2080         int retval;
 2081 
 2082         retval = 1;
 2083 
 2084         /*
 2085          * We don't traverse the peripheral driver list like we do the
 2086          * other lists, because it is a linker set, and therefore cannot be
 2087          * changed during runtime.  If the peripheral driver list is ever
 2088          * re-done to be something other than a linker set (i.e. it can
 2089          * change while the system is running), the list traversal should
 2090          * be modified to work like the other traversal functions.
 2091          */
 2092         for (pdrv = (start_pdrv ? start_pdrv : periph_drivers);
 2093              *pdrv != NULL; pdrv++) {
 2094                 retval = tr_func(pdrv, arg);
 2095 
 2096                 if (retval == 0)
 2097                         return(retval);
 2098         }
 2099 
 2100         return(retval);
 2101 }
 2102 
 2103 static int
 2104 xptpdperiphtraverse(struct periph_driver **pdrv,
 2105                     struct cam_periph *start_periph,
 2106                     xpt_periphfunc_t *tr_func, void *arg)
 2107 {
 2108         struct cam_periph *periph, *next_periph;
 2109         int retval;
 2110 
 2111         retval = 1;
 2112 
 2113         for (periph = (start_periph ? start_periph :
 2114              TAILQ_FIRST(&(*pdrv)->units)); periph != NULL;
 2115              periph = next_periph) {
 2116 
 2117                 next_periph = TAILQ_NEXT(periph, unit_links);
 2118 
 2119                 retval = tr_func(periph, arg);
 2120                 if (retval == 0)
 2121                         return(retval);
 2122         }
 2123         return(retval);
 2124 }
 2125 
 2126 static int
 2127 xptdefbusfunc(struct cam_eb *bus, void *arg)
 2128 {
 2129         struct xpt_traverse_config *tr_config;
 2130 
 2131         tr_config = (struct xpt_traverse_config *)arg;
 2132 
 2133         if (tr_config->depth == XPT_DEPTH_BUS) {
 2134                 xpt_busfunc_t *tr_func;
 2135 
 2136                 tr_func = (xpt_busfunc_t *)tr_config->tr_func;
 2137 
 2138                 return(tr_func(bus, tr_config->tr_arg));
 2139         } else
 2140                 return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg));
 2141 }
 2142 
 2143 static int
 2144 xptdeftargetfunc(struct cam_et *target, void *arg)
 2145 {
 2146         struct xpt_traverse_config *tr_config;
 2147 
 2148         tr_config = (struct xpt_traverse_config *)arg;
 2149 
 2150         if (tr_config->depth == XPT_DEPTH_TARGET) {
 2151                 xpt_targetfunc_t *tr_func;
 2152 
 2153                 tr_func = (xpt_targetfunc_t *)tr_config->tr_func;
 2154 
 2155                 return(tr_func(target, tr_config->tr_arg));
 2156         } else
 2157                 return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg));
 2158 }
 2159 
 2160 static int
 2161 xptdefdevicefunc(struct cam_ed *device, void *arg)
 2162 {
 2163         struct xpt_traverse_config *tr_config;
 2164 
 2165         tr_config = (struct xpt_traverse_config *)arg;
 2166 
 2167         if (tr_config->depth == XPT_DEPTH_DEVICE) {
 2168                 xpt_devicefunc_t *tr_func;
 2169 
 2170                 tr_func = (xpt_devicefunc_t *)tr_config->tr_func;
 2171 
 2172                 return(tr_func(device, tr_config->tr_arg));
 2173         } else
 2174                 return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg));
 2175 }
 2176 
 2177 static int
 2178 xptdefperiphfunc(struct cam_periph *periph, void *arg)
 2179 {
 2180         struct xpt_traverse_config *tr_config;
 2181         xpt_periphfunc_t *tr_func;
 2182 
 2183         tr_config = (struct xpt_traverse_config *)arg;
 2184 
 2185         tr_func = (xpt_periphfunc_t *)tr_config->tr_func;
 2186 
 2187         /*
 2188          * Unlike the other default functions, we don't check for depth
 2189          * here.  The peripheral driver level is the last level in the EDT,
 2190          * so if we're here, we should execute the function in question.
 2191          */
 2192         return(tr_func(periph, tr_config->tr_arg));
 2193 }
 2194 
 2195 /*
 2196  * Execute the given function for every bus in the EDT.
 2197  */
 2198 static int
 2199 xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg)
 2200 {
 2201         struct xpt_traverse_config tr_config;
 2202 
 2203         tr_config.depth = XPT_DEPTH_BUS;
 2204         tr_config.tr_func = tr_func;
 2205         tr_config.tr_arg = arg;
 2206 
 2207         return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
 2208 }
 2209 
 2210 /*
 2211  * Execute the given function for every device in the EDT.
 2212  */
 2213 static int
 2214 xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg)
 2215 {
 2216         struct xpt_traverse_config tr_config;
 2217 
 2218         tr_config.depth = XPT_DEPTH_DEVICE;
 2219         tr_config.tr_func = tr_func;
 2220         tr_config.tr_arg = arg;
 2221 
 2222         return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
 2223 }
 2224 
 2225 static int
 2226 xptsetasyncfunc(struct cam_ed *device, void *arg)
 2227 {
 2228         struct cam_path path;
 2229         struct ccb_getdev cgd;
 2230         struct ccb_setasync *csa = (struct ccb_setasync *)arg;
 2231 
 2232         /*
 2233          * Don't report unconfigured devices (Wildcard devs,
 2234          * devices only for target mode, device instances
 2235          * that have been invalidated but are waiting for
 2236          * their last reference count to be released).
 2237          */
 2238         if ((device->flags & CAM_DEV_UNCONFIGURED) != 0)
 2239                 return (1);
 2240 
 2241         xpt_compile_path(&path,
 2242                          NULL,
 2243                          device->target->bus->path_id,
 2244                          device->target->target_id,
 2245                          device->lun_id);
 2246         xpt_setup_ccb(&cgd.ccb_h, &path, CAM_PRIORITY_NORMAL);
 2247         cgd.ccb_h.func_code = XPT_GDEV_TYPE;
 2248         xpt_action((union ccb *)&cgd);
 2249         csa->callback(csa->callback_arg,
 2250                             AC_FOUND_DEVICE,
 2251                             &path, &cgd);
 2252         xpt_release_path(&path);
 2253 
 2254         return(1);
 2255 }
 2256 
 2257 static int
 2258 xptsetasyncbusfunc(struct cam_eb *bus, void *arg)
 2259 {
 2260         struct cam_path path;
 2261         struct ccb_pathinq cpi;
 2262         struct ccb_setasync *csa = (struct ccb_setasync *)arg;
 2263 
 2264         xpt_compile_path(&path, /*periph*/NULL,
 2265                          bus->sim->path_id,
 2266                          CAM_TARGET_WILDCARD,
 2267                          CAM_LUN_WILDCARD);
 2268         xpt_setup_ccb(&cpi.ccb_h, &path, CAM_PRIORITY_NORMAL);
 2269         cpi.ccb_h.func_code = XPT_PATH_INQ;
 2270         xpt_action((union ccb *)&cpi);
 2271         csa->callback(csa->callback_arg,
 2272                             AC_PATH_REGISTERED,
 2273                             &path, &cpi);
 2274         xpt_release_path(&path);
 2275 
 2276         return(1);
 2277 }
 2278 
 2279 void
 2280 xpt_action(union ccb *start_ccb)
 2281 {
 2282 
 2283         CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_action\n"));
 2284 
 2285         start_ccb->ccb_h.status = CAM_REQ_INPROG;
 2286         /* Compatibility for RL-unaware code. */
 2287         if (CAM_PRIORITY_TO_RL(start_ccb->ccb_h.pinfo.priority) == 0)
 2288             start_ccb->ccb_h.pinfo.priority += CAM_PRIORITY_NORMAL - 1;
 2289         (*(start_ccb->ccb_h.path->bus->xport->action))(start_ccb);
 2290 }
 2291 
 2292 void
 2293 xpt_action_default(union ccb *start_ccb)
 2294 {
 2295 
 2296         CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_action_default\n"));
 2297 
 2298 
 2299         switch (start_ccb->ccb_h.func_code) {
 2300         case XPT_SCSI_IO:
 2301         {
 2302                 struct cam_ed *device;
 2303 #ifdef CAMDEBUG
 2304                 char cdb_str[(SCSI_MAX_CDBLEN * 3) + 1];
 2305                 struct cam_path *path;
 2306 
 2307                 path = start_ccb->ccb_h.path;
 2308 #endif
 2309 
 2310                 /*
 2311                  * For the sake of compatibility with SCSI-1
 2312                  * devices that may not understand the identify
 2313                  * message, we include lun information in the
 2314                  * second byte of all commands.  SCSI-1 specifies
 2315                  * that luns are a 3 bit value and reserves only 3
 2316                  * bits for lun information in the CDB.  Later
 2317                  * revisions of the SCSI spec allow for more than 8
 2318                  * luns, but have deprecated lun information in the
 2319                  * CDB.  So, if the lun won't fit, we must omit.
 2320                  *
 2321                  * Also be aware that during initial probing for devices,
 2322                  * the inquiry information is unknown but initialized to 0.
 2323                  * This means that this code will be exercised while probing
 2324                  * devices with an ANSI revision greater than 2.
 2325                  */
 2326                 device = start_ccb->ccb_h.path->device;
 2327                 if (device->protocol_version <= SCSI_REV_2
 2328                  && start_ccb->ccb_h.target_lun < 8
 2329                  && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) {
 2330 
 2331                         start_ccb->csio.cdb_io.cdb_bytes[1] |=
 2332                             start_ccb->ccb_h.target_lun << 5;
 2333                 }
 2334                 start_ccb->csio.scsi_status = SCSI_STATUS_OK;
 2335                 CAM_DEBUG(path, CAM_DEBUG_CDB,("%s. CDB: %s\n",
 2336                           scsi_op_desc(start_ccb->csio.cdb_io.cdb_bytes[0],
 2337                                        &path->device->inq_data),
 2338                           scsi_cdb_string(start_ccb->csio.cdb_io.cdb_bytes,
 2339                                           cdb_str, sizeof(cdb_str))));
 2340         }
 2341         /* FALLTHROUGH */
 2342         case XPT_TARGET_IO:
 2343         case XPT_CONT_TARGET_IO:
 2344                 start_ccb->csio.sense_resid = 0;
 2345                 start_ccb->csio.resid = 0;
 2346                 /* FALLTHROUGH */
 2347         case XPT_ATA_IO:
 2348                 if (start_ccb->ccb_h.func_code == XPT_ATA_IO) {
 2349                         start_ccb->ataio.resid = 0;
 2350                 }
 2351                 /* FALLTHROUGH */
 2352         case XPT_RESET_DEV:
 2353         case XPT_ENG_EXEC:
 2354         {
 2355                 struct cam_path *path = start_ccb->ccb_h.path;
 2356                 int frozen;
 2357 
 2358                 frozen = cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb);
 2359                 path->device->sim->devq->alloc_openings += frozen;
 2360                 if (frozen > 0)
 2361                         xpt_run_dev_allocq(path->bus);
 2362                 if (xpt_schedule_dev_sendq(path->bus, path->device))
 2363                         xpt_run_dev_sendq(path->bus);
 2364                 break;
 2365         }
 2366         case XPT_CALC_GEOMETRY:
 2367         {
 2368                 struct cam_sim *sim;
 2369 
 2370                 /* Filter out garbage */
 2371                 if (start_ccb->ccg.block_size == 0
 2372                  || start_ccb->ccg.volume_size == 0) {
 2373                         start_ccb->ccg.cylinders = 0;
 2374                         start_ccb->ccg.heads = 0;
 2375                         start_ccb->ccg.secs_per_track = 0;
 2376                         start_ccb->ccb_h.status = CAM_REQ_CMP;
 2377                         break;
 2378                 }
 2379 #ifdef PC98
 2380                 /*
 2381                  * In a PC-98 system, geometry translation depens on
 2382                  * the "real" device geometry obtained from mode page 4.
 2383                  * SCSI geometry translation is performed in the
 2384                  * initialization routine of the SCSI BIOS and the result
 2385                  * stored in host memory.  If the translation is available
 2386                  * in host memory, use it.  If not, rely on the default
 2387                  * translation the device driver performs.
 2388                  */
 2389                 if (scsi_da_bios_params(&start_ccb->ccg) != 0) {
 2390                         start_ccb->ccb_h.status = CAM_REQ_CMP;
 2391                         break;
 2392                 }
 2393 #endif
 2394                 sim = start_ccb->ccb_h.path->bus->sim;
 2395                 (*(sim->sim_action))(sim, start_ccb);
 2396                 break;
 2397         }
 2398         case XPT_ABORT:
 2399         {
 2400                 union ccb* abort_ccb;
 2401 
 2402                 abort_ccb = start_ccb->cab.abort_ccb;
 2403                 if (XPT_FC_IS_DEV_QUEUED(abort_ccb)) {
 2404 
 2405                         if (abort_ccb->ccb_h.pinfo.index >= 0) {
 2406                                 struct cam_ccbq *ccbq;
 2407                                 struct cam_ed *device;
 2408 
 2409                                 device = abort_ccb->ccb_h.path->device;
 2410                                 ccbq = &device->ccbq;
 2411                                 device->sim->devq->alloc_openings -= 
 2412                                     cam_ccbq_remove_ccb(ccbq, abort_ccb);
 2413                                 abort_ccb->ccb_h.status =
 2414                                     CAM_REQ_ABORTED|CAM_DEV_QFRZN;
 2415                                 xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
 2416                                 xpt_done(abort_ccb);
 2417                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 2418                                 break;
 2419                         }
 2420                         if (abort_ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX
 2421                          && (abort_ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) {
 2422                                 /*
 2423                                  * We've caught this ccb en route to
 2424                                  * the SIM.  Flag it for abort and the
 2425                                  * SIM will do so just before starting
 2426                                  * real work on the CCB.
 2427                                  */
 2428                                 abort_ccb->ccb_h.status =
 2429                                     CAM_REQ_ABORTED|CAM_DEV_QFRZN;
 2430                                 xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
 2431                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 2432                                 break;
 2433                         }
 2434                 }
 2435                 if (XPT_FC_IS_QUEUED(abort_ccb)
 2436                  && (abort_ccb->ccb_h.pinfo.index == CAM_DONEQ_INDEX)) {
 2437                         /*
 2438                          * It's already completed but waiting
 2439                          * for our SWI to get to it.
 2440                          */
 2441                         start_ccb->ccb_h.status = CAM_UA_ABORT;
 2442                         break;
 2443                 }
 2444                 /*
 2445                  * If we weren't able to take care of the abort request
 2446                  * in the XPT, pass the request down to the SIM for processing.
 2447                  */
 2448         }
 2449         /* FALLTHROUGH */
 2450         case XPT_ACCEPT_TARGET_IO:
 2451         case XPT_EN_LUN:
 2452         case XPT_IMMED_NOTIFY:
 2453         case XPT_NOTIFY_ACK:
 2454         case XPT_RESET_BUS:
 2455         case XPT_IMMEDIATE_NOTIFY:
 2456         case XPT_NOTIFY_ACKNOWLEDGE:
 2457         case XPT_GET_SIM_KNOB:
 2458         case XPT_SET_SIM_KNOB:
 2459         {
 2460                 struct cam_sim *sim;
 2461 
 2462                 sim = start_ccb->ccb_h.path->bus->sim;
 2463                 (*(sim->sim_action))(sim, start_ccb);
 2464                 break;
 2465         }
 2466         case XPT_PATH_INQ:
 2467         {
 2468                 struct cam_sim *sim;
 2469 
 2470                 sim = start_ccb->ccb_h.path->bus->sim;
 2471                 (*(sim->sim_action))(sim, start_ccb);
 2472                 break;
 2473         }
 2474         case XPT_PATH_STATS:
 2475                 start_ccb->cpis.last_reset =
 2476                         start_ccb->ccb_h.path->bus->last_reset;
 2477                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 2478                 break;
 2479         case XPT_GDEV_TYPE:
 2480         {
 2481                 struct cam_ed *dev;
 2482 
 2483                 dev = start_ccb->ccb_h.path->device;
 2484                 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
 2485                         start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
 2486                 } else {
 2487                         struct ccb_getdev *cgd;
 2488                         struct cam_eb *bus;
 2489                         struct cam_et *tar;
 2490 
 2491                         cgd = &start_ccb->cgd;
 2492                         bus = cgd->ccb_h.path->bus;
 2493                         tar = cgd->ccb_h.path->target;
 2494                         cgd->protocol = dev->protocol;
 2495                         cgd->inq_data = dev->inq_data;
 2496                         cgd->ident_data = dev->ident_data;
 2497                         cgd->inq_flags = dev->inq_flags;
 2498                         cgd->ccb_h.status = CAM_REQ_CMP;
 2499                         cgd->serial_num_len = dev->serial_num_len;
 2500                         if ((dev->serial_num_len > 0)
 2501                          && (dev->serial_num != NULL))
 2502                                 bcopy(dev->serial_num, cgd->serial_num,
 2503                                       dev->serial_num_len);
 2504                 }
 2505                 break;
 2506         }
 2507         case XPT_GDEV_STATS:
 2508         {
 2509                 struct cam_ed *dev;
 2510 
 2511                 dev = start_ccb->ccb_h.path->device;
 2512                 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
 2513                         start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
 2514                 } else {
 2515                         struct ccb_getdevstats *cgds;
 2516                         struct cam_eb *bus;
 2517                         struct cam_et *tar;
 2518 
 2519                         cgds = &start_ccb->cgds;
 2520                         bus = cgds->ccb_h.path->bus;
 2521                         tar = cgds->ccb_h.path->target;
 2522                         cgds->dev_openings = dev->ccbq.dev_openings;
 2523                         cgds->dev_active = dev->ccbq.dev_active;
 2524                         cgds->devq_openings = dev->ccbq.devq_openings;
 2525                         cgds->devq_queued = dev->ccbq.queue.entries;
 2526                         cgds->held = dev->ccbq.held;
 2527                         cgds->last_reset = tar->last_reset;
 2528                         cgds->maxtags = dev->maxtags;
 2529                         cgds->mintags = dev->mintags;
 2530                         if (timevalcmp(&tar->last_reset, &bus->last_reset, <))
 2531                                 cgds->last_reset = bus->last_reset;
 2532                         cgds->ccb_h.status = CAM_REQ_CMP;
 2533                 }
 2534                 break;
 2535         }
 2536         case XPT_GDEVLIST:
 2537         {
 2538                 struct cam_periph       *nperiph;
 2539                 struct periph_list      *periph_head;
 2540                 struct ccb_getdevlist   *cgdl;
 2541                 u_int                   i;
 2542                 struct cam_ed           *device;
 2543                 int                     found;
 2544 
 2545 
 2546                 found = 0;
 2547 
 2548                 /*
 2549                  * Don't want anyone mucking with our data.
 2550                  */
 2551                 device = start_ccb->ccb_h.path->device;
 2552                 periph_head = &device->periphs;
 2553                 cgdl = &start_ccb->cgdl;
 2554 
 2555                 /*
 2556                  * Check and see if the list has changed since the user
 2557                  * last requested a list member.  If so, tell them that the
 2558                  * list has changed, and therefore they need to start over
 2559                  * from the beginning.
 2560                  */
 2561                 if ((cgdl->index != 0) &&
 2562                     (cgdl->generation != device->generation)) {
 2563                         cgdl->status = CAM_GDEVLIST_LIST_CHANGED;
 2564                         break;
 2565                 }
 2566 
 2567                 /*
 2568                  * Traverse the list of peripherals and attempt to find
 2569                  * the requested peripheral.
 2570                  */
 2571                 for (nperiph = SLIST_FIRST(periph_head), i = 0;
 2572                      (nperiph != NULL) && (i <= cgdl->index);
 2573                      nperiph = SLIST_NEXT(nperiph, periph_links), i++) {
 2574                         if (i == cgdl->index) {
 2575                                 strncpy(cgdl->periph_name,
 2576                                         nperiph->periph_name,
 2577                                         DEV_IDLEN);
 2578                                 cgdl->unit_number = nperiph->unit_number;
 2579                                 found = 1;
 2580                         }
 2581                 }
 2582                 if (found == 0) {
 2583                         cgdl->status = CAM_GDEVLIST_ERROR;
 2584                         break;
 2585                 }
 2586 
 2587                 if (nperiph == NULL)
 2588                         cgdl->status = CAM_GDEVLIST_LAST_DEVICE;
 2589                 else
 2590                         cgdl->status = CAM_GDEVLIST_MORE_DEVS;
 2591 
 2592                 cgdl->index++;
 2593                 cgdl->generation = device->generation;
 2594 
 2595                 cgdl->ccb_h.status = CAM_REQ_CMP;
 2596                 break;
 2597         }
 2598         case XPT_DEV_MATCH:
 2599         {
 2600                 dev_pos_type position_type;
 2601                 struct ccb_dev_match *cdm;
 2602 
 2603                 cdm = &start_ccb->cdm;
 2604 
 2605                 /*
 2606                  * There are two ways of getting at information in the EDT.
 2607                  * The first way is via the primary EDT tree.  It starts
 2608                  * with a list of busses, then a list of targets on a bus,
 2609                  * then devices/luns on a target, and then peripherals on a
 2610                  * device/lun.  The "other" way is by the peripheral driver
 2611                  * lists.  The peripheral driver lists are organized by
 2612                  * peripheral driver.  (obviously)  So it makes sense to
 2613                  * use the peripheral driver list if the user is looking
 2614                  * for something like "da1", or all "da" devices.  If the
 2615                  * user is looking for something on a particular bus/target
 2616                  * or lun, it's generally better to go through the EDT tree.
 2617                  */
 2618 
 2619                 if (cdm->pos.position_type != CAM_DEV_POS_NONE)
 2620                         position_type = cdm->pos.position_type;
 2621                 else {
 2622                         u_int i;
 2623 
 2624                         position_type = CAM_DEV_POS_NONE;
 2625 
 2626                         for (i = 0; i < cdm->num_patterns; i++) {
 2627                                 if ((cdm->patterns[i].type == DEV_MATCH_BUS)
 2628                                  ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){
 2629                                         position_type = CAM_DEV_POS_EDT;
 2630                                         break;
 2631                                 }
 2632                         }
 2633 
 2634                         if (cdm->num_patterns == 0)
 2635                                 position_type = CAM_DEV_POS_EDT;
 2636                         else if (position_type == CAM_DEV_POS_NONE)
 2637                                 position_type = CAM_DEV_POS_PDRV;
 2638                 }
 2639 
 2640                 switch(position_type & CAM_DEV_POS_TYPEMASK) {
 2641                 case CAM_DEV_POS_EDT:
 2642                         xptedtmatch(cdm);
 2643                         break;
 2644                 case CAM_DEV_POS_PDRV:
 2645                         xptperiphlistmatch(cdm);
 2646                         break;
 2647                 default:
 2648                         cdm->status = CAM_DEV_MATCH_ERROR;
 2649                         break;
 2650                 }
 2651 
 2652                 if (cdm->status == CAM_DEV_MATCH_ERROR)
 2653                         start_ccb->ccb_h.status = CAM_REQ_CMP_ERR;
 2654                 else
 2655                         start_ccb->ccb_h.status = CAM_REQ_CMP;
 2656 
 2657                 break;
 2658         }
 2659         case XPT_SASYNC_CB:
 2660         {
 2661                 struct ccb_setasync *csa;
 2662                 struct async_node *cur_entry;
 2663                 struct async_list *async_head;
 2664                 u_int32_t added;
 2665 
 2666                 csa = &start_ccb->csa;
 2667                 added = csa->event_enable;
 2668                 async_head = &csa->ccb_h.path->device->asyncs;
 2669 
 2670                 /*
 2671                  * If there is already an entry for us, simply
 2672                  * update it.
 2673                  */
 2674                 cur_entry = SLIST_FIRST(async_head);
 2675                 while (cur_entry != NULL) {
 2676                         if ((cur_entry->callback_arg == csa->callback_arg)
 2677                          && (cur_entry->callback == csa->callback))
 2678                                 break;
 2679                         cur_entry = SLIST_NEXT(cur_entry, links);
 2680                 }
 2681 
 2682                 if (cur_entry != NULL) {
 2683                         /*
 2684                          * If the request has no flags set,
 2685                          * remove the entry.
 2686                          */
 2687                         added &= ~cur_entry->event_enable;
 2688                         if (csa->event_enable == 0) {
 2689                                 SLIST_REMOVE(async_head, cur_entry,
 2690                                              async_node, links);
 2691                                 xpt_release_device(csa->ccb_h.path->device);
 2692                                 free(cur_entry, M_CAMXPT);
 2693                         } else {
 2694                                 cur_entry->event_enable = csa->event_enable;
 2695                         }
 2696                         csa->event_enable = added;
 2697                 } else {
 2698                         cur_entry = malloc(sizeof(*cur_entry), M_CAMXPT,
 2699                                            M_NOWAIT);
 2700                         if (cur_entry == NULL) {
 2701                                 csa->ccb_h.status = CAM_RESRC_UNAVAIL;
 2702                                 break;
 2703                         }
 2704                         cur_entry->event_enable = csa->event_enable;
 2705                         cur_entry->callback_arg = csa->callback_arg;
 2706                         cur_entry->callback = csa->callback;
 2707                         SLIST_INSERT_HEAD(async_head, cur_entry, links);
 2708                         xpt_acquire_device(csa->ccb_h.path->device);
 2709                 }
 2710                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 2711                 break;
 2712         }
 2713         case XPT_REL_SIMQ:
 2714         {
 2715                 struct ccb_relsim *crs;
 2716                 struct cam_ed *dev;
 2717 
 2718                 crs = &start_ccb->crs;
 2719                 dev = crs->ccb_h.path->device;
 2720                 if (dev == NULL) {
 2721 
 2722                         crs->ccb_h.status = CAM_DEV_NOT_THERE;
 2723                         break;
 2724                 }
 2725 
 2726                 if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) {
 2727 
 2728                         if (INQ_DATA_TQ_ENABLED(&dev->inq_data)) {
 2729                                 /* Don't ever go below one opening */
 2730                                 if (crs->openings > 0) {
 2731                                         xpt_dev_ccbq_resize(crs->ccb_h.path,
 2732                                                             crs->openings);
 2733 
 2734                                         if (bootverbose) {
 2735                                                 xpt_print(crs->ccb_h.path,
 2736                                                     "tagged openings now %d\n",
 2737                                                     crs->openings);
 2738                                         }
 2739                                 }
 2740                         }
 2741                 }
 2742 
 2743                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) {
 2744 
 2745                         if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
 2746 
 2747                                 /*
 2748                                  * Just extend the old timeout and decrement
 2749                                  * the freeze count so that a single timeout
 2750                                  * is sufficient for releasing the queue.
 2751                                  */
 2752                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
 2753                                 callout_stop(&dev->callout);
 2754                         } else {
 2755 
 2756                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
 2757                         }
 2758 
 2759                         callout_reset(&dev->callout,
 2760                             (crs->release_timeout * hz) / 1000,
 2761                             xpt_release_devq_timeout, dev);
 2762 
 2763                         dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING;
 2764 
 2765                 }
 2766 
 2767                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) {
 2768 
 2769                         if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) {
 2770                                 /*
 2771                                  * Decrement the freeze count so that a single
 2772                                  * completion is still sufficient to unfreeze
 2773                                  * the queue.
 2774                                  */
 2775                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
 2776                         } else {
 2777 
 2778                                 dev->flags |= CAM_DEV_REL_ON_COMPLETE;
 2779                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
 2780                         }
 2781                 }
 2782 
 2783                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) {
 2784 
 2785                         if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
 2786                          || (dev->ccbq.dev_active == 0)) {
 2787 
 2788                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
 2789                         } else {
 2790 
 2791                                 dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY;
 2792                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
 2793                         }
 2794                 }
 2795 
 2796                 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0) {
 2797                         xpt_release_devq_rl(crs->ccb_h.path, /*runlevel*/
 2798                             (crs->release_flags & RELSIM_RELEASE_RUNLEVEL) ?
 2799                                 crs->release_timeout : 0,
 2800                             /*count*/1, /*run_queue*/TRUE);
 2801                 }
 2802                 start_ccb->crs.qfrozen_cnt = dev->ccbq.queue.qfrozen_cnt[0];
 2803                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 2804                 break;
 2805         }
 2806         case XPT_DEBUG: {
 2807 #ifdef CAMDEBUG
 2808 #ifdef CAM_DEBUG_DELAY
 2809                 cam_debug_delay = CAM_DEBUG_DELAY;
 2810 #endif
 2811                 cam_dflags = start_ccb->cdbg.flags;
 2812                 if (cam_dpath != NULL) {
 2813                         xpt_free_path(cam_dpath);
 2814                         cam_dpath = NULL;
 2815                 }
 2816 
 2817                 if (cam_dflags != CAM_DEBUG_NONE) {
 2818                         if (xpt_create_path(&cam_dpath, xpt_periph,
 2819                                             start_ccb->ccb_h.path_id,
 2820                                             start_ccb->ccb_h.target_id,
 2821                                             start_ccb->ccb_h.target_lun) !=
 2822                                             CAM_REQ_CMP) {
 2823                                 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
 2824                                 cam_dflags = CAM_DEBUG_NONE;
 2825                         } else {
 2826                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 2827                                 xpt_print(cam_dpath, "debugging flags now %x\n",
 2828                                     cam_dflags);
 2829                         }
 2830                 } else {
 2831                         cam_dpath = NULL;
 2832                         start_ccb->ccb_h.status = CAM_REQ_CMP;
 2833                 }
 2834 #else /* !CAMDEBUG */
 2835                 start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
 2836 #endif /* CAMDEBUG */
 2837                 break;
 2838         }
 2839         case XPT_FREEZE_QUEUE:
 2840         {
 2841                 struct ccb_relsim *crs = &start_ccb->crs;
 2842 
 2843                 xpt_freeze_devq_rl(crs->ccb_h.path, /*runlevel*/
 2844                     (crs->release_flags & RELSIM_RELEASE_RUNLEVEL) ?
 2845                     crs->release_timeout : 0, /*count*/1);
 2846                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 2847                 break;
 2848         }
 2849         case XPT_NOOP:
 2850                 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0)
 2851                         xpt_freeze_devq(start_ccb->ccb_h.path, 1);
 2852                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 2853                 break;
 2854         default:
 2855         case XPT_SDEV_TYPE:
 2856         case XPT_TERM_IO:
 2857         case XPT_ENG_INQ:
 2858                 /* XXX Implement */
 2859                 start_ccb->ccb_h.status = CAM_PROVIDE_FAIL;
 2860                 if (start_ccb->ccb_h.func_code & XPT_FC_DEV_QUEUED) {
 2861                         xpt_done(start_ccb);
 2862                 }
 2863                 break;
 2864         }
 2865 }
 2866 
 2867 void
 2868 xpt_polled_action(union ccb *start_ccb)
 2869 {
 2870         u_int32_t timeout;
 2871         struct    cam_sim *sim;
 2872         struct    cam_devq *devq;
 2873         struct    cam_ed *dev;
 2874 
 2875 
 2876         timeout = start_ccb->ccb_h.timeout;
 2877         sim = start_ccb->ccb_h.path->bus->sim;
 2878         devq = sim->devq;
 2879         dev = start_ccb->ccb_h.path->device;
 2880 
 2881         mtx_assert(sim->mtx, MA_OWNED);
 2882 
 2883         /*
 2884          * Steal an opening so that no other queued requests
 2885          * can get it before us while we simulate interrupts.
 2886          */
 2887         dev->ccbq.devq_openings--;
 2888         dev->ccbq.dev_openings--;
 2889 
 2890         while(((devq != NULL && devq->send_openings <= 0) ||
 2891            dev->ccbq.dev_openings < 0) && (--timeout > 0)) {
 2892                 DELAY(1000);
 2893                 (*(sim->sim_poll))(sim);
 2894                 camisr_runqueue(&sim->sim_doneq);
 2895         }
 2896 
 2897         dev->ccbq.devq_openings++;
 2898         dev->ccbq.dev_openings++;
 2899 
 2900         if (timeout != 0) {
 2901                 xpt_action(start_ccb);
 2902                 while(--timeout > 0) {
 2903                         (*(sim->sim_poll))(sim);
 2904                         camisr_runqueue(&sim->sim_doneq);
 2905                         if ((start_ccb->ccb_h.status  & CAM_STATUS_MASK)
 2906                             != CAM_REQ_INPROG)
 2907                                 break;
 2908                         DELAY(1000);
 2909                 }
 2910                 if (timeout == 0) {
 2911                         /*
 2912                          * XXX Is it worth adding a sim_timeout entry
 2913                          * point so we can attempt recovery?  If
 2914                          * this is only used for dumps, I don't think
 2915                          * it is.
 2916                          */
 2917                         start_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
 2918                 }
 2919         } else {
 2920                 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
 2921         }
 2922 }
 2923 
 2924 /*
 2925  * Schedule a peripheral driver to receive a ccb when it's
 2926  * target device has space for more transactions.
 2927  */
 2928 void
 2929 xpt_schedule(struct cam_periph *perph, u_int32_t new_priority)
 2930 {
 2931         struct cam_ed *device;
 2932         int runq = 0;
 2933 
 2934         mtx_assert(perph->sim->mtx, MA_OWNED);
 2935 
 2936         CAM_DEBUG(perph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n"));
 2937         device = perph->path->device;
 2938         if (periph_is_queued(perph)) {
 2939                 /* Simply reorder based on new priority */
 2940                 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
 2941                           ("   change priority to %d\n", new_priority));
 2942                 if (new_priority < perph->pinfo.priority) {
 2943                         camq_change_priority(&device->drvq,
 2944                                              perph->pinfo.index,
 2945                                              new_priority);
 2946                         runq = xpt_schedule_dev_allocq(perph->path->bus, device);
 2947                 }
 2948         } else {
 2949                 /* New entry on the queue */
 2950                 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
 2951                           ("   added periph to queue\n"));
 2952                 perph->pinfo.priority = new_priority;
 2953                 perph->pinfo.generation = ++device->drvq.generation;
 2954                 camq_insert(&device->drvq, &perph->pinfo);
 2955                 runq = xpt_schedule_dev_allocq(perph->path->bus, device);
 2956         }
 2957         if (runq != 0) {
 2958                 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
 2959                           ("   calling xpt_run_devq\n"));
 2960                 xpt_run_dev_allocq(perph->path->bus);
 2961         }
 2962 }
 2963 
 2964 
 2965 /*
 2966  * Schedule a device to run on a given queue.
 2967  * If the device was inserted as a new entry on the queue,
 2968  * return 1 meaning the device queue should be run. If we
 2969  * were already queued, implying someone else has already
 2970  * started the queue, return 0 so the caller doesn't attempt
 2971  * to run the queue.
 2972  */
 2973 int
 2974 xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo,
 2975                  u_int32_t new_priority)
 2976 {
 2977         int retval;
 2978         u_int32_t old_priority;
 2979 
 2980         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_schedule_dev\n"));
 2981 
 2982         old_priority = pinfo->priority;
 2983 
 2984         /*
 2985          * Are we already queued?
 2986          */
 2987         if (pinfo->index != CAM_UNQUEUED_INDEX) {
 2988                 /* Simply reorder based on new priority */
 2989                 if (new_priority < old_priority) {
 2990                         camq_change_priority(queue, pinfo->index,
 2991                                              new_priority);
 2992                         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 2993                                         ("changed priority to %d\n",
 2994                                          new_priority));
 2995                         retval = 1;
 2996                 } else
 2997                         retval = 0;
 2998         } else {
 2999                 /* New entry on the queue */
 3000                 if (new_priority < old_priority)
 3001                         pinfo->priority = new_priority;
 3002 
 3003                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3004                                 ("Inserting onto queue\n"));
 3005                 pinfo->generation = ++queue->generation;
 3006                 camq_insert(queue, pinfo);
 3007                 retval = 1;
 3008         }
 3009         return (retval);
 3010 }
 3011 
 3012 static void
 3013 xpt_run_dev_allocq(struct cam_eb *bus)
 3014 {
 3015         struct  cam_devq *devq;
 3016 
 3017         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_allocq\n"));
 3018         devq = bus->sim->devq;
 3019 
 3020         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3021                         ("   qfrozen_cnt == 0x%x, entries == %d, "
 3022                          "openings == %d, active == %d\n",
 3023                          devq->alloc_queue.qfrozen_cnt[0],
 3024                          devq->alloc_queue.entries,
 3025                          devq->alloc_openings,
 3026                          devq->alloc_active));
 3027 
 3028         devq->alloc_queue.qfrozen_cnt[0]++;
 3029         while ((devq->alloc_queue.entries > 0)
 3030             && (devq->alloc_openings > 0)
 3031             && (devq->alloc_queue.qfrozen_cnt[0] <= 1)) {
 3032                 struct  cam_ed_qinfo *qinfo;
 3033                 struct  cam_ed *device;
 3034                 union   ccb *work_ccb;
 3035                 struct  cam_periph *drv;
 3036                 struct  camq *drvq;
 3037 
 3038                 qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->alloc_queue,
 3039                                                            CAMQ_HEAD);
 3040                 device = qinfo->device;
 3041                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3042                                 ("running device %p\n", device));
 3043 
 3044                 drvq = &device->drvq;
 3045 
 3046 #ifdef CAMDEBUG
 3047                 if (drvq->entries <= 0) {
 3048                         panic("xpt_run_dev_allocq: "
 3049                               "Device on queue without any work to do");
 3050                 }
 3051 #endif
 3052                 if ((work_ccb = xpt_get_ccb(device)) != NULL) {
 3053                         devq->alloc_openings--;
 3054                         devq->alloc_active++;
 3055                         drv = (struct cam_periph*)camq_remove(drvq, CAMQ_HEAD);
 3056                         xpt_setup_ccb(&work_ccb->ccb_h, drv->path,
 3057                                       drv->pinfo.priority);
 3058                         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3059                                         ("calling periph start\n"));
 3060                         drv->periph_start(drv, work_ccb);
 3061                 } else {
 3062                         /*
 3063                          * Malloc failure in alloc_ccb
 3064                          */
 3065                         /*
 3066                          * XXX add us to a list to be run from free_ccb
 3067                          * if we don't have any ccbs active on this
 3068                          * device queue otherwise we may never get run
 3069                          * again.
 3070                          */
 3071                         break;
 3072                 }
 3073 
 3074                 /* We may have more work. Attempt to reschedule. */
 3075                 xpt_schedule_dev_allocq(bus, device);
 3076         }
 3077         devq->alloc_queue.qfrozen_cnt[0]--;
 3078 }
 3079 
 3080 static void
 3081 xpt_run_dev_sendq(struct cam_eb *bus)
 3082 {
 3083         struct  cam_devq *devq;
 3084 
 3085         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_sendq\n"));
 3086 
 3087         devq = bus->sim->devq;
 3088 
 3089         devq->send_queue.qfrozen_cnt[0]++;
 3090         while ((devq->send_queue.entries > 0)
 3091             && (devq->send_openings > 0)
 3092             && (devq->send_queue.qfrozen_cnt[0] <= 1)) {
 3093                 struct  cam_ed_qinfo *qinfo;
 3094                 struct  cam_ed *device;
 3095                 union ccb *work_ccb;
 3096                 struct  cam_sim *sim;
 3097 
 3098                 qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->send_queue,
 3099                                                            CAMQ_HEAD);
 3100                 device = qinfo->device;
 3101                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3102                                 ("running device %p\n", device));
 3103 
 3104                 work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD);
 3105                 if (work_ccb == NULL) {
 3106                         printf("device on run queue with no ccbs???\n");
 3107                         continue;
 3108                 }
 3109 
 3110                 if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) {
 3111 
 3112                         mtx_lock(&xsoftc.xpt_lock);
 3113                         if (xsoftc.num_highpower <= 0) {
 3114                                 /*
 3115                                  * We got a high power command, but we
 3116                                  * don't have any available slots.  Freeze
 3117                                  * the device queue until we have a slot
 3118                                  * available.
 3119                                  */
 3120                                 xpt_freeze_devq(work_ccb->ccb_h.path, 1);
 3121                                 STAILQ_INSERT_TAIL(&xsoftc.highpowerq,
 3122                                                    &work_ccb->ccb_h,
 3123                                                    xpt_links.stqe);
 3124 
 3125                                 mtx_unlock(&xsoftc.xpt_lock);
 3126                                 continue;
 3127                         } else {
 3128                                 /*
 3129                                  * Consume a high power slot while
 3130                                  * this ccb runs.
 3131                                  */
 3132                                 xsoftc.num_highpower--;
 3133                         }
 3134                         mtx_unlock(&xsoftc.xpt_lock);
 3135                 }
 3136                 cam_ccbq_remove_ccb(&device->ccbq, work_ccb);
 3137                 cam_ccbq_send_ccb(&device->ccbq, work_ccb);
 3138 
 3139                 devq->send_openings--;
 3140                 devq->send_active++;
 3141 
 3142                 xpt_schedule_dev_sendq(bus, device);
 3143 
 3144                 if (work_ccb && (work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0){
 3145                         /*
 3146                          * The client wants to freeze the queue
 3147                          * after this CCB is sent.
 3148                          */
 3149                         xpt_freeze_devq(work_ccb->ccb_h.path, 1);
 3150                 }
 3151 
 3152                 /* In Target mode, the peripheral driver knows best... */
 3153                 if (work_ccb->ccb_h.func_code == XPT_SCSI_IO) {
 3154                         if ((device->inq_flags & SID_CmdQue) != 0
 3155                          && work_ccb->csio.tag_action != CAM_TAG_ACTION_NONE)
 3156                                 work_ccb->ccb_h.flags |= CAM_TAG_ACTION_VALID;
 3157                         else
 3158                                 /*
 3159                                  * Clear this in case of a retried CCB that
 3160                                  * failed due to a rejected tag.
 3161                                  */
 3162                                 work_ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
 3163                 }
 3164 
 3165                 /*
 3166                  * Device queues can be shared among multiple sim instances
 3167                  * that reside on different busses.  Use the SIM in the queue
 3168                  * CCB's path, rather than the one in the bus that was passed
 3169                  * into this function.
 3170                  */
 3171                 sim = work_ccb->ccb_h.path->bus->sim;
 3172                 (*(sim->sim_action))(sim, work_ccb);
 3173         }
 3174         devq->send_queue.qfrozen_cnt[0]--;
 3175 }
 3176 
 3177 /*
 3178  * This function merges stuff from the slave ccb into the master ccb, while
 3179  * keeping important fields in the master ccb constant.
 3180  */
 3181 void
 3182 xpt_merge_ccb(union ccb *master_ccb, union ccb *slave_ccb)
 3183 {
 3184 
 3185         /*
 3186          * Pull fields that are valid for peripheral drivers to set
 3187          * into the master CCB along with the CCB "payload".
 3188          */
 3189         master_ccb->ccb_h.retry_count = slave_ccb->ccb_h.retry_count;
 3190         master_ccb->ccb_h.func_code = slave_ccb->ccb_h.func_code;
 3191         master_ccb->ccb_h.timeout = slave_ccb->ccb_h.timeout;
 3192         master_ccb->ccb_h.flags = slave_ccb->ccb_h.flags;
 3193         bcopy(&(&slave_ccb->ccb_h)[1], &(&master_ccb->ccb_h)[1],
 3194               sizeof(union ccb) - sizeof(struct ccb_hdr));
 3195 }
 3196 
 3197 void
 3198 xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority)
 3199 {
 3200 
 3201         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_setup_ccb\n"));
 3202         ccb_h->pinfo.priority = priority;
 3203         ccb_h->path = path;
 3204         ccb_h->path_id = path->bus->path_id;
 3205         if (path->target)
 3206                 ccb_h->target_id = path->target->target_id;
 3207         else
 3208                 ccb_h->target_id = CAM_TARGET_WILDCARD;
 3209         if (path->device) {
 3210                 ccb_h->target_lun = path->device->lun_id;
 3211                 ccb_h->pinfo.generation = ++path->device->ccbq.queue.generation;
 3212         } else {
 3213                 ccb_h->target_lun = CAM_TARGET_WILDCARD;
 3214         }
 3215         ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
 3216         ccb_h->flags = 0;
 3217 }
 3218 
 3219 /* Path manipulation functions */
 3220 cam_status
 3221 xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph,
 3222                 path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
 3223 {
 3224         struct     cam_path *path;
 3225         cam_status status;
 3226 
 3227         path = (struct cam_path *)malloc(sizeof(*path), M_CAMXPT, M_NOWAIT);
 3228 
 3229         if (path == NULL) {
 3230                 status = CAM_RESRC_UNAVAIL;
 3231                 return(status);
 3232         }
 3233         status = xpt_compile_path(path, perph, path_id, target_id, lun_id);
 3234         if (status != CAM_REQ_CMP) {
 3235                 free(path, M_CAMXPT);
 3236                 path = NULL;
 3237         }
 3238         *new_path_ptr = path;
 3239         return (status);
 3240 }
 3241 
 3242 cam_status
 3243 xpt_create_path_unlocked(struct cam_path **new_path_ptr,
 3244                          struct cam_periph *periph, path_id_t path_id,
 3245                          target_id_t target_id, lun_id_t lun_id)
 3246 {
 3247         struct     cam_path *path;
 3248         struct     cam_eb *bus = NULL;
 3249         cam_status status;
 3250         int        need_unlock = 0;
 3251 
 3252         path = (struct cam_path *)malloc(sizeof(*path), M_CAMXPT, M_WAITOK);
 3253 
 3254         if (path_id != CAM_BUS_WILDCARD) {
 3255                 bus = xpt_find_bus(path_id);
 3256                 if (bus != NULL) {
 3257                         need_unlock = 1;
 3258                         CAM_SIM_LOCK(bus->sim);
 3259                 }
 3260         }
 3261         status = xpt_compile_path(path, periph, path_id, target_id, lun_id);
 3262         if (need_unlock)
 3263                 CAM_SIM_UNLOCK(bus->sim);
 3264         if (status != CAM_REQ_CMP) {
 3265                 free(path, M_CAMXPT);
 3266                 path = NULL;
 3267         }
 3268         *new_path_ptr = path;
 3269         return (status);
 3270 }
 3271 
 3272 cam_status
 3273 xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph,
 3274                  path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
 3275 {
 3276         struct       cam_eb *bus;
 3277         struct       cam_et *target;
 3278         struct       cam_ed *device;
 3279         cam_status   status;
 3280 
 3281         status = CAM_REQ_CMP;   /* Completed without error */
 3282         target = NULL;          /* Wildcarded */
 3283         device = NULL;          /* Wildcarded */
 3284 
 3285         /*
 3286          * We will potentially modify the EDT, so block interrupts
 3287          * that may attempt to create cam paths.
 3288          */
 3289         bus = xpt_find_bus(path_id);
 3290         if (bus == NULL) {
 3291                 status = CAM_PATH_INVALID;
 3292         } else {
 3293                 target = xpt_find_target(bus, target_id);
 3294                 if (target == NULL) {
 3295                         /* Create one */
 3296                         struct cam_et *new_target;
 3297 
 3298                         new_target = xpt_alloc_target(bus, target_id);
 3299                         if (new_target == NULL) {
 3300                                 status = CAM_RESRC_UNAVAIL;
 3301                         } else {
 3302                                 target = new_target;
 3303                         }
 3304                 }
 3305                 if (target != NULL) {
 3306                         device = xpt_find_device(target, lun_id);
 3307                         if (device == NULL) {
 3308                                 /* Create one */
 3309                                 struct cam_ed *new_device;
 3310 
 3311                                 new_device =
 3312                                     (*(bus->xport->alloc_device))(bus,
 3313                                                                       target,
 3314                                                                       lun_id);
 3315                                 if (new_device == NULL) {
 3316                                         status = CAM_RESRC_UNAVAIL;
 3317                                 } else {
 3318                                         device = new_device;
 3319                                 }
 3320                         }
 3321                 }
 3322         }
 3323 
 3324         /*
 3325          * Only touch the user's data if we are successful.
 3326          */
 3327         if (status == CAM_REQ_CMP) {
 3328                 new_path->periph = perph;
 3329                 new_path->bus = bus;
 3330                 new_path->target = target;
 3331                 new_path->device = device;
 3332                 CAM_DEBUG(new_path, CAM_DEBUG_TRACE, ("xpt_compile_path\n"));
 3333         } else {
 3334                 if (device != NULL)
 3335                         xpt_release_device(device);
 3336                 if (target != NULL)
 3337                         xpt_release_target(target);
 3338                 if (bus != NULL)
 3339                         xpt_release_bus(bus);
 3340         }
 3341         return (status);
 3342 }
 3343 
 3344 void
 3345 xpt_release_path(struct cam_path *path)
 3346 {
 3347         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_path\n"));
 3348         if (path->device != NULL) {
 3349                 xpt_release_device(path->device);
 3350                 path->device = NULL;
 3351         }
 3352         if (path->target != NULL) {
 3353                 xpt_release_target(path->target);
 3354                 path->target = NULL;
 3355         }
 3356         if (path->bus != NULL) {
 3357                 xpt_release_bus(path->bus);
 3358                 path->bus = NULL;
 3359         }
 3360 }
 3361 
 3362 void
 3363 xpt_free_path(struct cam_path *path)
 3364 {
 3365 
 3366         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_free_path\n"));
 3367         xpt_release_path(path);
 3368         free(path, M_CAMXPT);
 3369 }
 3370 
 3371 
 3372 /*
 3373  * Return -1 for failure, 0 for exact match, 1 for match with wildcards
 3374  * in path1, 2 for match with wildcards in path2.
 3375  */
 3376 int
 3377 xpt_path_comp(struct cam_path *path1, struct cam_path *path2)
 3378 {
 3379         int retval = 0;
 3380 
 3381         if (path1->bus != path2->bus) {
 3382                 if (path1->bus->path_id == CAM_BUS_WILDCARD)
 3383                         retval = 1;
 3384                 else if (path2->bus->path_id == CAM_BUS_WILDCARD)
 3385                         retval = 2;
 3386                 else
 3387                         return (-1);
 3388         }
 3389         if (path1->target != path2->target) {
 3390                 if (path1->target->target_id == CAM_TARGET_WILDCARD) {
 3391                         if (retval == 0)
 3392                                 retval = 1;
 3393                 } else if (path2->target->target_id == CAM_TARGET_WILDCARD)
 3394                         retval = 2;
 3395                 else
 3396                         return (-1);
 3397         }
 3398         if (path1->device != path2->device) {
 3399                 if (path1->device->lun_id == CAM_LUN_WILDCARD) {
 3400                         if (retval == 0)
 3401                                 retval = 1;
 3402                 } else if (path2->device->lun_id == CAM_LUN_WILDCARD)
 3403                         retval = 2;
 3404                 else
 3405                         return (-1);
 3406         }
 3407         return (retval);
 3408 }
 3409 
 3410 void
 3411 xpt_print_path(struct cam_path *path)
 3412 {
 3413 
 3414         if (path == NULL)
 3415                 printf("(nopath): ");
 3416         else {
 3417                 if (path->periph != NULL)
 3418                         printf("(%s%d:", path->periph->periph_name,
 3419                                path->periph->unit_number);
 3420                 else
 3421                         printf("(noperiph:");
 3422 
 3423                 if (path->bus != NULL)
 3424                         printf("%s%d:%d:", path->bus->sim->sim_name,
 3425                                path->bus->sim->unit_number,
 3426                                path->bus->sim->bus_id);
 3427                 else
 3428                         printf("nobus:");
 3429 
 3430                 if (path->target != NULL)
 3431                         printf("%d:", path->target->target_id);
 3432                 else
 3433                         printf("X:");
 3434 
 3435                 if (path->device != NULL)
 3436                         printf("%d): ", path->device->lun_id);
 3437                 else
 3438                         printf("X): ");
 3439         }
 3440 }
 3441 
 3442 void
 3443 xpt_print(struct cam_path *path, const char *fmt, ...)
 3444 {
 3445         va_list ap;
 3446         xpt_print_path(path);
 3447         va_start(ap, fmt);
 3448         vprintf(fmt, ap);
 3449         va_end(ap);
 3450 }
 3451 
 3452 int
 3453 xpt_path_string(struct cam_path *path, char *str, size_t str_len)
 3454 {
 3455         struct sbuf sb;
 3456 
 3457 #ifdef INVARIANTS
 3458         if (path != NULL && path->bus != NULL)
 3459                 mtx_assert(path->bus->sim->mtx, MA_OWNED);
 3460 #endif
 3461 
 3462         sbuf_new(&sb, str, str_len, 0);
 3463 
 3464         if (path == NULL)
 3465                 sbuf_printf(&sb, "(nopath): ");
 3466         else {
 3467                 if (path->periph != NULL)
 3468                         sbuf_printf(&sb, "(%s%d:", path->periph->periph_name,
 3469                                     path->periph->unit_number);
 3470                 else
 3471                         sbuf_printf(&sb, "(noperiph:");
 3472 
 3473                 if (path->bus != NULL)
 3474                         sbuf_printf(&sb, "%s%d:%d:", path->bus->sim->sim_name,
 3475                                     path->bus->sim->unit_number,
 3476                                     path->bus->sim->bus_id);
 3477                 else
 3478                         sbuf_printf(&sb, "nobus:");
 3479 
 3480                 if (path->target != NULL)
 3481                         sbuf_printf(&sb, "%d:", path->target->target_id);
 3482                 else
 3483                         sbuf_printf(&sb, "X:");
 3484 
 3485                 if (path->device != NULL)
 3486                         sbuf_printf(&sb, "%d): ", path->device->lun_id);
 3487                 else
 3488                         sbuf_printf(&sb, "X): ");
 3489         }
 3490         sbuf_finish(&sb);
 3491 
 3492         return(sbuf_len(&sb));
 3493 }
 3494 
 3495 path_id_t
 3496 xpt_path_path_id(struct cam_path *path)
 3497 {
 3498         mtx_assert(path->bus->sim->mtx, MA_OWNED);
 3499 
 3500         return(path->bus->path_id);
 3501 }
 3502 
 3503 target_id_t
 3504 xpt_path_target_id(struct cam_path *path)
 3505 {
 3506         mtx_assert(path->bus->sim->mtx, MA_OWNED);
 3507 
 3508         if (path->target != NULL)
 3509                 return (path->target->target_id);
 3510         else
 3511                 return (CAM_TARGET_WILDCARD);
 3512 }
 3513 
 3514 lun_id_t
 3515 xpt_path_lun_id(struct cam_path *path)
 3516 {
 3517         mtx_assert(path->bus->sim->mtx, MA_OWNED);
 3518 
 3519         if (path->device != NULL)
 3520                 return (path->device->lun_id);
 3521         else
 3522                 return (CAM_LUN_WILDCARD);
 3523 }
 3524 
 3525 struct cam_sim *
 3526 xpt_path_sim(struct cam_path *path)
 3527 {
 3528 
 3529         return (path->bus->sim);
 3530 }
 3531 
 3532 struct cam_periph*
 3533 xpt_path_periph(struct cam_path *path)
 3534 {
 3535         mtx_assert(path->bus->sim->mtx, MA_OWNED);
 3536 
 3537         return (path->periph);
 3538 }
 3539 
 3540 /*
 3541  * Release a CAM control block for the caller.  Remit the cost of the structure
 3542  * to the device referenced by the path.  If the this device had no 'credits'
 3543  * and peripheral drivers have registered async callbacks for this notification
 3544  * call them now.
 3545  */
 3546 void
 3547 xpt_release_ccb(union ccb *free_ccb)
 3548 {
 3549         struct   cam_path *path;
 3550         struct   cam_ed *device;
 3551         struct   cam_eb *bus;
 3552         struct   cam_sim *sim;
 3553 
 3554         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_release_ccb\n"));
 3555         path = free_ccb->ccb_h.path;
 3556         device = path->device;
 3557         bus = path->bus;
 3558         sim = bus->sim;
 3559 
 3560         mtx_assert(sim->mtx, MA_OWNED);
 3561 
 3562         cam_ccbq_release_opening(&device->ccbq);
 3563         if (device->flags & CAM_DEV_RESIZE_QUEUE_NEEDED) {
 3564                 device->flags &= ~CAM_DEV_RESIZE_QUEUE_NEEDED;
 3565                 cam_ccbq_resize(&device->ccbq,
 3566                     device->ccbq.dev_openings + device->ccbq.dev_active);
 3567         }
 3568         if (sim->ccb_count > sim->max_ccbs) {
 3569                 xpt_free_ccb(free_ccb);
 3570                 sim->ccb_count--;
 3571         } else {
 3572                 SLIST_INSERT_HEAD(&sim->ccb_freeq, &free_ccb->ccb_h,
 3573                     xpt_links.sle);
 3574         }
 3575         if (sim->devq == NULL) {
 3576                 return;
 3577         }
 3578         sim->devq->alloc_openings++;
 3579         sim->devq->alloc_active--;
 3580         if (device_is_alloc_queued(device) == 0)
 3581                 xpt_schedule_dev_allocq(bus, device);
 3582         xpt_run_dev_allocq(bus);
 3583 }
 3584 
 3585 /* Functions accessed by SIM drivers */
 3586 
 3587 static struct xpt_xport xport_default = {
 3588         .alloc_device = xpt_alloc_device_default,
 3589         .action = xpt_action_default,
 3590         .async = xpt_dev_async_default,
 3591 };
 3592 
 3593 /*
 3594  * A sim structure, listing the SIM entry points and instance
 3595  * identification info is passed to xpt_bus_register to hook the SIM
 3596  * into the CAM framework.  xpt_bus_register creates a cam_eb entry
 3597  * for this new bus and places it in the array of busses and assigns
 3598  * it a path_id.  The path_id may be influenced by "hard wiring"
 3599  * information specified by the user.  Once interrupt services are
 3600  * available, the bus will be probed.
 3601  */
 3602 int32_t
 3603 xpt_bus_register(struct cam_sim *sim, device_t parent, u_int32_t bus)
 3604 {
 3605         struct cam_eb *new_bus;
 3606         struct cam_eb *old_bus;
 3607         struct ccb_pathinq cpi;
 3608         struct cam_path *path;
 3609         cam_status status;
 3610 
 3611         mtx_assert(sim->mtx, MA_OWNED);
 3612 
 3613         sim->bus_id = bus;
 3614         new_bus = (struct cam_eb *)malloc(sizeof(*new_bus),
 3615                                           M_CAMXPT, M_NOWAIT);
 3616         if (new_bus == NULL) {
 3617                 /* Couldn't satisfy request */
 3618                 return (CAM_RESRC_UNAVAIL);
 3619         }
 3620         path = (struct cam_path *)malloc(sizeof(*path), M_CAMXPT, M_NOWAIT);
 3621         if (path == NULL) {
 3622                 free(new_bus, M_CAMXPT);
 3623                 return (CAM_RESRC_UNAVAIL);
 3624         }
 3625 
 3626         if (strcmp(sim->sim_name, "xpt") != 0) {
 3627                 sim->path_id =
 3628                     xptpathid(sim->sim_name, sim->unit_number, sim->bus_id);
 3629         }
 3630 
 3631         TAILQ_INIT(&new_bus->et_entries);
 3632         new_bus->path_id = sim->path_id;
 3633         cam_sim_hold(sim);
 3634         new_bus->sim = sim;
 3635         timevalclear(&new_bus->last_reset);
 3636         new_bus->flags = 0;
 3637         new_bus->refcount = 1;  /* Held until a bus_deregister event */
 3638         new_bus->generation = 0;
 3639 
 3640         mtx_lock(&xsoftc.xpt_topo_lock);
 3641         old_bus = TAILQ_FIRST(&xsoftc.xpt_busses);
 3642         while (old_bus != NULL
 3643             && old_bus->path_id < new_bus->path_id)
 3644                 old_bus = TAILQ_NEXT(old_bus, links);
 3645         if (old_bus != NULL)
 3646                 TAILQ_INSERT_BEFORE(old_bus, new_bus, links);
 3647         else
 3648                 TAILQ_INSERT_TAIL(&xsoftc.xpt_busses, new_bus, links);
 3649         xsoftc.bus_generation++;
 3650         mtx_unlock(&xsoftc.xpt_topo_lock);
 3651 
 3652         /*
 3653          * Set a default transport so that a PATH_INQ can be issued to
 3654          * the SIM.  This will then allow for probing and attaching of
 3655          * a more appropriate transport.
 3656          */
 3657         new_bus->xport = &xport_default;
 3658 
 3659         status = xpt_compile_path(path, /*periph*/NULL, sim->path_id,
 3660                                   CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
 3661         if (status != CAM_REQ_CMP)
 3662                 printf("xpt_compile_path returned %d\n", status);
 3663 
 3664         xpt_setup_ccb(&cpi.ccb_h, path, CAM_PRIORITY_NORMAL);
 3665         cpi.ccb_h.func_code = XPT_PATH_INQ;
 3666         xpt_action((union ccb *)&cpi);
 3667 
 3668         if (cpi.ccb_h.status == CAM_REQ_CMP) {
 3669                 switch (cpi.transport) {
 3670                 case XPORT_SPI:
 3671                 case XPORT_SAS:
 3672                 case XPORT_FC:
 3673                 case XPORT_USB:
 3674                 case XPORT_ISCSI:
 3675                 case XPORT_PPB:
 3676                         new_bus->xport = scsi_get_xport();
 3677                         break;
 3678                 case XPORT_ATA:
 3679                 case XPORT_SATA:
 3680                         new_bus->xport = ata_get_xport();
 3681                         break;
 3682                 default:
 3683                         new_bus->xport = &xport_default;
 3684                         break;
 3685                 }
 3686         }
 3687 
 3688         /* Notify interested parties */
 3689         if (sim->path_id != CAM_XPT_PATH_ID) {
 3690                 union   ccb *scan_ccb;
 3691 
 3692                 xpt_async(AC_PATH_REGISTERED, path, &cpi);
 3693                 /* Initiate bus rescan. */
 3694                 scan_ccb = xpt_alloc_ccb_nowait();
 3695                 scan_ccb->ccb_h.path = path;
 3696                 scan_ccb->ccb_h.func_code = XPT_SCAN_BUS;
 3697                 scan_ccb->crcn.flags = 0;
 3698                 xpt_rescan(scan_ccb);
 3699         } else
 3700                 xpt_free_path(path);
 3701         return (CAM_SUCCESS);
 3702 }
 3703 
 3704 int32_t
 3705 xpt_bus_deregister(path_id_t pathid)
 3706 {
 3707         struct cam_path bus_path;
 3708         cam_status status;
 3709 
 3710         status = xpt_compile_path(&bus_path, NULL, pathid,
 3711                                   CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
 3712         if (status != CAM_REQ_CMP)
 3713                 return (status);
 3714 
 3715         xpt_async(AC_LOST_DEVICE, &bus_path, NULL);
 3716         xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL);
 3717 
 3718         /* Release the reference count held while registered. */
 3719         xpt_release_bus(bus_path.bus);
 3720         xpt_release_path(&bus_path);
 3721 
 3722         return (CAM_REQ_CMP);
 3723 }
 3724 
 3725 static path_id_t
 3726 xptnextfreepathid(void)
 3727 {
 3728         struct cam_eb *bus;
 3729         path_id_t pathid;
 3730         const char *strval;
 3731 
 3732         pathid = 0;
 3733         mtx_lock(&xsoftc.xpt_topo_lock);
 3734         bus = TAILQ_FIRST(&xsoftc.xpt_busses);
 3735 retry:
 3736         /* Find an unoccupied pathid */
 3737         while (bus != NULL && bus->path_id <= pathid) {
 3738                 if (bus->path_id == pathid)
 3739                         pathid++;
 3740                 bus = TAILQ_NEXT(bus, links);
 3741         }
 3742         mtx_unlock(&xsoftc.xpt_topo_lock);
 3743 
 3744         /*
 3745          * Ensure that this pathid is not reserved for
 3746          * a bus that may be registered in the future.
 3747          */
 3748         if (resource_string_value("scbus", pathid, "at", &strval) == 0) {
 3749                 ++pathid;
 3750                 /* Start the search over */
 3751                 mtx_lock(&xsoftc.xpt_topo_lock);
 3752                 goto retry;
 3753         }
 3754         return (pathid);
 3755 }
 3756 
 3757 static path_id_t
 3758 xptpathid(const char *sim_name, int sim_unit, int sim_bus)
 3759 {
 3760         path_id_t pathid;
 3761         int i, dunit, val;
 3762         char buf[32];
 3763         const char *dname;
 3764 
 3765         pathid = CAM_XPT_PATH_ID;
 3766         snprintf(buf, sizeof(buf), "%s%d", sim_name, sim_unit);
 3767         i = 0;
 3768         while ((resource_find_match(&i, &dname, &dunit, "at", buf)) == 0) {
 3769                 if (strcmp(dname, "scbus")) {
 3770                         /* Avoid a bit of foot shooting. */
 3771                         continue;
 3772                 }
 3773                 if (dunit < 0)          /* unwired?! */
 3774                         continue;
 3775                 if (resource_int_value("scbus", dunit, "bus", &val) == 0) {
 3776                         if (sim_bus == val) {
 3777                                 pathid = dunit;
 3778                                 break;
 3779                         }
 3780                 } else if (sim_bus == 0) {
 3781                         /* Unspecified matches bus 0 */
 3782                         pathid = dunit;
 3783                         break;
 3784                 } else {
 3785                         printf("Ambiguous scbus configuration for %s%d "
 3786                                "bus %d, cannot wire down.  The kernel "
 3787                                "config entry for scbus%d should "
 3788                                "specify a controller bus.\n"
 3789                                "Scbus will be assigned dynamically.\n",
 3790                                sim_name, sim_unit, sim_bus, dunit);
 3791                         break;
 3792                 }
 3793         }
 3794 
 3795         if (pathid == CAM_XPT_PATH_ID)
 3796                 pathid = xptnextfreepathid();
 3797         return (pathid);
 3798 }
 3799 
 3800 void
 3801 xpt_async(u_int32_t async_code, struct cam_path *path, void *async_arg)
 3802 {
 3803         struct cam_eb *bus;
 3804         struct cam_et *target, *next_target;
 3805         struct cam_ed *device, *next_device;
 3806 
 3807         mtx_assert(path->bus->sim->mtx, MA_OWNED);
 3808 
 3809         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_async\n"));
 3810 
 3811         /*
 3812          * Most async events come from a CAM interrupt context.  In
 3813          * a few cases, the error recovery code at the peripheral layer,
 3814          * which may run from our SWI or a process context, may signal
 3815          * deferred events with a call to xpt_async.
 3816          */
 3817 
 3818         bus = path->bus;
 3819 
 3820         if (async_code == AC_BUS_RESET) {
 3821                 /* Update our notion of when the last reset occurred */
 3822                 microtime(&bus->last_reset);
 3823         }
 3824 
 3825         for (target = TAILQ_FIRST(&bus->et_entries);
 3826              target != NULL;
 3827              target = next_target) {
 3828 
 3829                 next_target = TAILQ_NEXT(target, links);
 3830 
 3831                 if (path->target != target
 3832                  && path->target->target_id != CAM_TARGET_WILDCARD
 3833                  && target->target_id != CAM_TARGET_WILDCARD)
 3834                         continue;
 3835 
 3836                 if (async_code == AC_SENT_BDR) {
 3837                         /* Update our notion of when the last reset occurred */
 3838                         microtime(&path->target->last_reset);
 3839                 }
 3840 
 3841                 for (device = TAILQ_FIRST(&target->ed_entries);
 3842                      device != NULL;
 3843                      device = next_device) {
 3844 
 3845                         next_device = TAILQ_NEXT(device, links);
 3846 
 3847                         if (path->device != device
 3848                          && path->device->lun_id != CAM_LUN_WILDCARD
 3849                          && device->lun_id != CAM_LUN_WILDCARD)
 3850                                 continue;
 3851                         /*
 3852                          * The async callback could free the device.
 3853                          * If it is a broadcast async, it doesn't hold
 3854                          * device reference, so take our own reference.
 3855                          */
 3856                         xpt_acquire_device(device);
 3857                         (*(bus->xport->async))(async_code, bus,
 3858                                                target, device,
 3859                                                async_arg);
 3860 
 3861                         xpt_async_bcast(&device->asyncs, async_code,
 3862                                         path, async_arg);
 3863                         xpt_release_device(device);
 3864                 }
 3865         }
 3866 
 3867         /*
 3868          * If this wasn't a fully wildcarded async, tell all
 3869          * clients that want all async events.
 3870          */
 3871         if (bus != xpt_periph->path->bus)
 3872                 xpt_async_bcast(&xpt_periph->path->device->asyncs, async_code,
 3873                                 path, async_arg);
 3874 }
 3875 
 3876 static void
 3877 xpt_async_bcast(struct async_list *async_head,
 3878                 u_int32_t async_code,
 3879                 struct cam_path *path, void *async_arg)
 3880 {
 3881         struct async_node *cur_entry;
 3882 
 3883         cur_entry = SLIST_FIRST(async_head);
 3884         while (cur_entry != NULL) {
 3885                 struct async_node *next_entry;
 3886                 /*
 3887                  * Grab the next list entry before we call the current
 3888                  * entry's callback.  This is because the callback function
 3889                  * can delete its async callback entry.
 3890                  */
 3891                 next_entry = SLIST_NEXT(cur_entry, links);
 3892                 if ((cur_entry->event_enable & async_code) != 0)
 3893                         cur_entry->callback(cur_entry->callback_arg,
 3894                                             async_code, path,
 3895                                             async_arg);
 3896                 cur_entry = next_entry;
 3897         }
 3898 }
 3899 
 3900 static void
 3901 xpt_dev_async_default(u_int32_t async_code, struct cam_eb *bus,
 3902                       struct cam_et *target, struct cam_ed *device,
 3903                       void *async_arg)
 3904 {
 3905         printf("%s called\n", __func__);
 3906 }
 3907 
 3908 u_int32_t
 3909 xpt_freeze_devq_rl(struct cam_path *path, cam_rl rl, u_int count)
 3910 {
 3911         struct cam_ed *dev = path->device;
 3912 
 3913         mtx_assert(path->bus->sim->mtx, MA_OWNED);
 3914         dev->sim->devq->alloc_openings +=
 3915             cam_ccbq_freeze(&dev->ccbq, rl, count);
 3916         /* Remove frozen device from allocq. */
 3917         if (device_is_alloc_queued(dev) &&
 3918             cam_ccbq_frozen(&dev->ccbq, CAM_PRIORITY_TO_RL(
 3919              CAMQ_GET_PRIO(&dev->drvq)))) {
 3920                 camq_remove(&dev->sim->devq->alloc_queue,
 3921                     dev->alloc_ccb_entry.pinfo.index);
 3922         }
 3923         /* Remove frozen device from sendq. */
 3924         if (device_is_send_queued(dev) &&
 3925             cam_ccbq_frozen_top(&dev->ccbq)) {
 3926                 camq_remove(&dev->sim->devq->send_queue,
 3927                     dev->send_ccb_entry.pinfo.index);
 3928         }
 3929         return (dev->ccbq.queue.qfrozen_cnt[rl]);
 3930 }
 3931 
 3932 u_int32_t
 3933 xpt_freeze_devq(struct cam_path *path, u_int count)
 3934 {
 3935 
 3936         return (xpt_freeze_devq_rl(path, 0, count));
 3937 }
 3938 
 3939 u_int32_t
 3940 xpt_freeze_simq(struct cam_sim *sim, u_int count)
 3941 {
 3942 
 3943         mtx_assert(sim->mtx, MA_OWNED);
 3944         sim->devq->send_queue.qfrozen_cnt[0] += count;
 3945         return (sim->devq->send_queue.qfrozen_cnt[0]);
 3946 }
 3947 
 3948 static void
 3949 xpt_release_devq_timeout(void *arg)
 3950 {
 3951         struct cam_ed *device;
 3952 
 3953         device = (struct cam_ed *)arg;
 3954 
 3955         xpt_release_devq_device(device, /*rl*/0, /*count*/1, /*run_queue*/TRUE);
 3956 }
 3957 
 3958 void
 3959 xpt_release_devq(struct cam_path *path, u_int count, int run_queue)
 3960 {
 3961         mtx_assert(path->bus->sim->mtx, MA_OWNED);
 3962 
 3963         xpt_release_devq_device(path->device, /*rl*/0, count, run_queue);
 3964 }
 3965 
 3966 void
 3967 xpt_release_devq_rl(struct cam_path *path, cam_rl rl, u_int count, int run_queue)
 3968 {
 3969         mtx_assert(path->bus->sim->mtx, MA_OWNED);
 3970 
 3971         xpt_release_devq_device(path->device, rl, count, run_queue);
 3972 }
 3973 
 3974 static void
 3975 xpt_release_devq_device(struct cam_ed *dev, cam_rl rl, u_int count, int run_queue)
 3976 {
 3977 
 3978         if (count > dev->ccbq.queue.qfrozen_cnt[rl]) {
 3979 #ifdef INVARIANTS
 3980                 printf("xpt_release_devq(%d): requested %u > present %u\n",
 3981                     rl, count, dev->ccbq.queue.qfrozen_cnt[rl]);
 3982 #endif
 3983                 count = dev->ccbq.queue.qfrozen_cnt[rl];
 3984         }
 3985         dev->sim->devq->alloc_openings -=
 3986             cam_ccbq_release(&dev->ccbq, rl, count);
 3987         if (cam_ccbq_frozen(&dev->ccbq, CAM_PRIORITY_TO_RL(
 3988             CAMQ_GET_PRIO(&dev->drvq))) == 0) {
 3989                 if (xpt_schedule_dev_allocq(dev->target->bus, dev))
 3990                         xpt_run_dev_allocq(dev->target->bus);
 3991         }
 3992         if (cam_ccbq_frozen_top(&dev->ccbq) == 0) {
 3993                 /*
 3994                  * No longer need to wait for a successful
 3995                  * command completion.
 3996                  */
 3997                 dev->flags &= ~CAM_DEV_REL_ON_COMPLETE;
 3998                 /*
 3999                  * Remove any timeouts that might be scheduled
 4000                  * to release this queue.
 4001                  */
 4002                 if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
 4003                         callout_stop(&dev->callout);
 4004                         dev->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING;
 4005                 }
 4006                 if (run_queue == 0)
 4007                         return;
 4008                 /*
 4009                  * Now that we are unfrozen schedule the
 4010                  * device so any pending transactions are
 4011                  * run.
 4012                  */
 4013                 if (xpt_schedule_dev_sendq(dev->target->bus, dev))
 4014                         xpt_run_dev_sendq(dev->target->bus);
 4015         }
 4016 }
 4017 
 4018 void
 4019 xpt_release_simq(struct cam_sim *sim, int run_queue)
 4020 {
 4021         struct  camq *sendq;
 4022 
 4023         mtx_assert(sim->mtx, MA_OWNED);
 4024         sendq = &(sim->devq->send_queue);
 4025         if (sendq->qfrozen_cnt[0] <= 0) {
 4026 #ifdef INVARIANTS
 4027                 printf("xpt_release_simq: requested 1 > present %u\n",
 4028                     sendq->qfrozen_cnt[0]);
 4029 #endif
 4030         } else
 4031                 sendq->qfrozen_cnt[0]--;
 4032         if (sendq->qfrozen_cnt[0] == 0) {
 4033                 /*
 4034                  * If there is a timeout scheduled to release this
 4035                  * sim queue, remove it.  The queue frozen count is
 4036                  * already at 0.
 4037                  */
 4038                 if ((sim->flags & CAM_SIM_REL_TIMEOUT_PENDING) != 0){
 4039                         callout_stop(&sim->callout);
 4040                         sim->flags &= ~CAM_SIM_REL_TIMEOUT_PENDING;
 4041                 }
 4042                 if (run_queue) {
 4043                         struct cam_eb *bus;
 4044 
 4045                         /*
 4046                          * Now that we are unfrozen run the send queue.
 4047                          */
 4048                         bus = xpt_find_bus(sim->path_id);
 4049                         xpt_run_dev_sendq(bus);
 4050                         xpt_release_bus(bus);
 4051                 }
 4052         }
 4053 }
 4054 
 4055 /*
 4056  * XXX Appears to be unused.
 4057  */
 4058 static void
 4059 xpt_release_simq_timeout(void *arg)
 4060 {
 4061         struct cam_sim *sim;
 4062 
 4063         sim = (struct cam_sim *)arg;
 4064         xpt_release_simq(sim, /* run_queue */ TRUE);
 4065 }
 4066 
 4067 void
 4068 xpt_done(union ccb *done_ccb)
 4069 {
 4070         struct cam_sim *sim;
 4071         int     first;
 4072 
 4073         CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_done\n"));
 4074         if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) != 0) {
 4075                 /*
 4076                  * Queue up the request for handling by our SWI handler
 4077                  * any of the "non-immediate" type of ccbs.
 4078                  */
 4079                 sim = done_ccb->ccb_h.path->bus->sim;
 4080                 TAILQ_INSERT_TAIL(&sim->sim_doneq, &done_ccb->ccb_h,
 4081                     sim_links.tqe);
 4082                 done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX;
 4083                 if ((sim->flags & CAM_SIM_ON_DONEQ) == 0) {
 4084                         mtx_lock(&cam_simq_lock);
 4085                         first = TAILQ_EMPTY(&cam_simq);
 4086                         TAILQ_INSERT_TAIL(&cam_simq, sim, links);
 4087                         mtx_unlock(&cam_simq_lock);
 4088                         sim->flags |= CAM_SIM_ON_DONEQ;
 4089                         if (first)
 4090                                 swi_sched(cambio_ih, 0);
 4091                 }
 4092         }
 4093 }
 4094 
 4095 union ccb *
 4096 xpt_alloc_ccb()
 4097 {
 4098         union ccb *new_ccb;
 4099 
 4100         new_ccb = malloc(sizeof(*new_ccb), M_CAMXPT, M_ZERO|M_WAITOK);
 4101         return (new_ccb);
 4102 }
 4103 
 4104 union ccb *
 4105 xpt_alloc_ccb_nowait()
 4106 {
 4107         union ccb *new_ccb;
 4108 
 4109         new_ccb = malloc(sizeof(*new_ccb), M_CAMXPT, M_ZERO|M_NOWAIT);
 4110         return (new_ccb);
 4111 }
 4112 
 4113 void
 4114 xpt_free_ccb(union ccb *free_ccb)
 4115 {
 4116         free(free_ccb, M_CAMXPT);
 4117 }
 4118 
 4119 
 4120 
 4121 /* Private XPT functions */
 4122 
 4123 /*
 4124  * Get a CAM control block for the caller. Charge the structure to the device
 4125  * referenced by the path.  If the this device has no 'credits' then the
 4126  * device already has the maximum number of outstanding operations under way
 4127  * and we return NULL. If we don't have sufficient resources to allocate more
 4128  * ccbs, we also return NULL.
 4129  */
 4130 static union ccb *
 4131 xpt_get_ccb(struct cam_ed *device)
 4132 {
 4133         union ccb *new_ccb;
 4134         struct cam_sim *sim;
 4135 
 4136         sim = device->sim;
 4137         if ((new_ccb = (union ccb *)SLIST_FIRST(&sim->ccb_freeq)) == NULL) {
 4138                 new_ccb = xpt_alloc_ccb_nowait();
 4139                 if (new_ccb == NULL) {
 4140                         return (NULL);
 4141                 }
 4142                 if ((sim->flags & CAM_SIM_MPSAFE) == 0)
 4143                         callout_handle_init(&new_ccb->ccb_h.timeout_ch);
 4144                 SLIST_INSERT_HEAD(&sim->ccb_freeq, &new_ccb->ccb_h,
 4145                                   xpt_links.sle);
 4146                 sim->ccb_count++;
 4147         }
 4148         cam_ccbq_take_opening(&device->ccbq);
 4149         SLIST_REMOVE_HEAD(&sim->ccb_freeq, xpt_links.sle);
 4150         return (new_ccb);
 4151 }
 4152 
 4153 static void
 4154 xpt_release_bus(struct cam_eb *bus)
 4155 {
 4156 
 4157         if ((--bus->refcount == 0)
 4158          && (TAILQ_FIRST(&bus->et_entries) == NULL)) {
 4159                 mtx_lock(&xsoftc.xpt_topo_lock);
 4160                 TAILQ_REMOVE(&xsoftc.xpt_busses, bus, links);
 4161                 xsoftc.bus_generation++;
 4162                 mtx_unlock(&xsoftc.xpt_topo_lock);
 4163                 cam_sim_release(bus->sim);
 4164                 free(bus, M_CAMXPT);
 4165         }
 4166 }
 4167 
 4168 static struct cam_et *
 4169 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id)
 4170 {
 4171         struct cam_et *target;
 4172 
 4173         target = (struct cam_et *)malloc(sizeof(*target), M_CAMXPT, M_NOWAIT);
 4174         if (target != NULL) {
 4175                 struct cam_et *cur_target;
 4176 
 4177                 TAILQ_INIT(&target->ed_entries);
 4178                 target->bus = bus;
 4179                 target->target_id = target_id;
 4180                 target->refcount = 1;
 4181                 target->generation = 0;
 4182                 timevalclear(&target->last_reset);
 4183                 /*
 4184                  * Hold a reference to our parent bus so it
 4185                  * will not go away before we do.
 4186                  */
 4187                 bus->refcount++;
 4188 
 4189                 /* Insertion sort into our bus's target list */
 4190                 cur_target = TAILQ_FIRST(&bus->et_entries);
 4191                 while (cur_target != NULL && cur_target->target_id < target_id)
 4192                         cur_target = TAILQ_NEXT(cur_target, links);
 4193 
 4194                 if (cur_target != NULL) {
 4195                         TAILQ_INSERT_BEFORE(cur_target, target, links);
 4196                 } else {
 4197                         TAILQ_INSERT_TAIL(&bus->et_entries, target, links);
 4198                 }
 4199                 bus->generation++;
 4200         }
 4201         return (target);
 4202 }
 4203 
 4204 static void
 4205 xpt_release_target(struct cam_et *target)
 4206 {
 4207 
 4208         if ((--target->refcount == 0)
 4209          && (TAILQ_FIRST(&target->ed_entries) == NULL)) {
 4210                 TAILQ_REMOVE(&target->bus->et_entries, target, links);
 4211                 target->bus->generation++;
 4212                 xpt_release_bus(target->bus);
 4213                 free(target, M_CAMXPT);
 4214         }
 4215 }
 4216 
 4217 static struct cam_ed *
 4218 xpt_alloc_device_default(struct cam_eb *bus, struct cam_et *target,
 4219                          lun_id_t lun_id)
 4220 {
 4221         struct cam_ed *device, *cur_device;
 4222 
 4223         device = xpt_alloc_device(bus, target, lun_id);
 4224         if (device == NULL)
 4225                 return (NULL);
 4226 
 4227         device->mintags = 1;
 4228         device->maxtags = 1;
 4229         bus->sim->max_ccbs += device->ccbq.devq_openings;
 4230         cur_device = TAILQ_FIRST(&target->ed_entries);
 4231         while (cur_device != NULL && cur_device->lun_id < lun_id)
 4232                 cur_device = TAILQ_NEXT(cur_device, links);
 4233         if (cur_device != NULL) {
 4234                 TAILQ_INSERT_BEFORE(cur_device, device, links);
 4235         } else {
 4236                 TAILQ_INSERT_TAIL(&target->ed_entries, device, links);
 4237         }
 4238         target->generation++;
 4239 
 4240         return (device);
 4241 }
 4242 
 4243 struct cam_ed *
 4244 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id)
 4245 {
 4246         struct     cam_ed *device;
 4247         struct     cam_devq *devq;
 4248         cam_status status;
 4249 
 4250         /* Make space for us in the device queue on our bus */
 4251         devq = bus->sim->devq;
 4252         status = cam_devq_resize(devq, devq->alloc_queue.array_size + 1);
 4253 
 4254         if (status != CAM_REQ_CMP) {
 4255                 device = NULL;
 4256         } else {
 4257                 device = (struct cam_ed *)malloc(sizeof(*device),
 4258                                                  M_CAMXPT, M_NOWAIT);
 4259         }
 4260 
 4261         if (device != NULL) {
 4262                 cam_init_pinfo(&device->alloc_ccb_entry.pinfo);
 4263                 device->alloc_ccb_entry.device = device;
 4264                 cam_init_pinfo(&device->send_ccb_entry.pinfo);
 4265                 device->send_ccb_entry.device = device;
 4266                 device->target = target;
 4267                 device->lun_id = lun_id;
 4268                 device->sim = bus->sim;
 4269                 /* Initialize our queues */
 4270                 if (camq_init(&device->drvq, 0) != 0) {
 4271                         free(device, M_CAMXPT);
 4272                         return (NULL);
 4273                 }
 4274                 if (cam_ccbq_init(&device->ccbq,
 4275                                   bus->sim->max_dev_openings) != 0) {
 4276                         camq_fini(&device->drvq);
 4277                         free(device, M_CAMXPT);
 4278                         return (NULL);
 4279                 }
 4280                 SLIST_INIT(&device->asyncs);
 4281                 SLIST_INIT(&device->periphs);
 4282                 device->generation = 0;
 4283                 device->owner = NULL;
 4284                 device->flags = CAM_DEV_UNCONFIGURED;
 4285                 device->tag_delay_count = 0;
 4286                 device->tag_saved_openings = 0;
 4287                 device->refcount = 1;
 4288                 callout_init_mtx(&device->callout, bus->sim->mtx, 0);
 4289 
 4290                 /*
 4291                  * Hold a reference to our parent target so it
 4292                  * will not go away before we do.
 4293                  */
 4294                 target->refcount++;
 4295 
 4296         }
 4297         return (device);
 4298 }
 4299 
 4300 void
 4301 xpt_acquire_device(struct cam_ed *device)
 4302 {
 4303 
 4304         device->refcount++;
 4305 }
 4306 
 4307 void
 4308 xpt_release_device(struct cam_ed *device)
 4309 {
 4310 
 4311         if (--device->refcount == 0) {
 4312                 struct cam_devq *devq;
 4313 
 4314                 if (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX
 4315                  || device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX)
 4316                         panic("Removing device while still queued for ccbs");
 4317 
 4318                 if ((device->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0)
 4319                                 callout_stop(&device->callout);
 4320 
 4321                 TAILQ_REMOVE(&device->target->ed_entries, device,links);
 4322                 device->target->generation++;
 4323                 device->target->bus->sim->max_ccbs -= device->ccbq.devq_openings;
 4324                 /* Release our slot in the devq */
 4325                 devq = device->target->bus->sim->devq;
 4326                 cam_devq_resize(devq, devq->alloc_queue.array_size - 1);
 4327                 camq_fini(&device->drvq);
 4328                 cam_ccbq_fini(&device->ccbq);
 4329                 xpt_release_target(device->target);
 4330                 free(device, M_CAMXPT);
 4331         }
 4332 }
 4333 
 4334 u_int32_t
 4335 xpt_dev_ccbq_resize(struct cam_path *path, int newopenings)
 4336 {
 4337         int     diff;
 4338         int     result;
 4339         struct  cam_ed *dev;
 4340 
 4341         dev = path->device;
 4342 
 4343         diff = newopenings - (dev->ccbq.dev_active + dev->ccbq.dev_openings);
 4344         result = cam_ccbq_resize(&dev->ccbq, newopenings);
 4345         if (result == CAM_REQ_CMP && (diff < 0)) {
 4346                 dev->flags |= CAM_DEV_RESIZE_QUEUE_NEEDED;
 4347         }
 4348         if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
 4349          || (dev->inq_flags & SID_CmdQue) != 0)
 4350                 dev->tag_saved_openings = newopenings;
 4351         /* Adjust the global limit */
 4352         dev->sim->max_ccbs += diff;
 4353         return (result);
 4354 }
 4355 
 4356 static struct cam_eb *
 4357 xpt_find_bus(path_id_t path_id)
 4358 {
 4359         struct cam_eb *bus;
 4360 
 4361         mtx_lock(&xsoftc.xpt_topo_lock);
 4362         for (bus = TAILQ_FIRST(&xsoftc.xpt_busses);
 4363              bus != NULL;
 4364              bus = TAILQ_NEXT(bus, links)) {
 4365                 if (bus->path_id == path_id) {
 4366                         bus->refcount++;
 4367                         break;
 4368                 }
 4369         }
 4370         mtx_unlock(&xsoftc.xpt_topo_lock);
 4371         return (bus);
 4372 }
 4373 
 4374 static struct cam_et *
 4375 xpt_find_target(struct cam_eb *bus, target_id_t target_id)
 4376 {
 4377         struct cam_et *target;
 4378 
 4379         for (target = TAILQ_FIRST(&bus->et_entries);
 4380              target != NULL;
 4381              target = TAILQ_NEXT(target, links)) {
 4382                 if (target->target_id == target_id) {
 4383                         target->refcount++;
 4384                         break;
 4385                 }
 4386         }
 4387         return (target);
 4388 }
 4389 
 4390 static struct cam_ed *
 4391 xpt_find_device(struct cam_et *target, lun_id_t lun_id)
 4392 {
 4393         struct cam_ed *device;
 4394 
 4395         for (device = TAILQ_FIRST(&target->ed_entries);
 4396              device != NULL;
 4397              device = TAILQ_NEXT(device, links)) {
 4398                 if (device->lun_id == lun_id) {
 4399                         device->refcount++;
 4400                         break;
 4401                 }
 4402         }
 4403         return (device);
 4404 }
 4405 
 4406 void
 4407 xpt_start_tags(struct cam_path *path)
 4408 {
 4409         struct ccb_relsim crs;
 4410         struct cam_ed *device;
 4411         struct cam_sim *sim;
 4412         int    newopenings;
 4413 
 4414         device = path->device;
 4415         sim = path->bus->sim;
 4416         device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
 4417         xpt_freeze_devq(path, /*count*/1);
 4418         device->inq_flags |= SID_CmdQue;
 4419         if (device->tag_saved_openings != 0)
 4420                 newopenings = device->tag_saved_openings;
 4421         else
 4422                 newopenings = min(device->maxtags,
 4423                                   sim->max_tagged_dev_openings);
 4424         xpt_dev_ccbq_resize(path, newopenings);
 4425         xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL);
 4426         crs.ccb_h.func_code = XPT_REL_SIMQ;
 4427         crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
 4428         crs.openings
 4429             = crs.release_timeout
 4430             = crs.qfrozen_cnt
 4431             = 0;
 4432         xpt_action((union ccb *)&crs);
 4433 }
 4434 
 4435 void
 4436 xpt_stop_tags(struct cam_path *path)
 4437 {
 4438         struct ccb_relsim crs;
 4439         struct cam_ed *device;
 4440         struct cam_sim *sim;
 4441 
 4442         device = path->device;
 4443         sim = path->bus->sim;
 4444         device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
 4445         device->tag_delay_count = 0;
 4446         xpt_freeze_devq(path, /*count*/1);
 4447         device->inq_flags &= ~SID_CmdQue;
 4448         xpt_dev_ccbq_resize(path, sim->max_dev_openings);
 4449         xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL);
 4450         crs.ccb_h.func_code = XPT_REL_SIMQ;
 4451         crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
 4452         crs.openings
 4453             = crs.release_timeout
 4454             = crs.qfrozen_cnt
 4455             = 0;
 4456         xpt_action((union ccb *)&crs);
 4457 }
 4458 
 4459 static void
 4460 xpt_boot_delay(void *arg)
 4461 {
 4462 
 4463         xpt_release_boot();
 4464 }
 4465 
 4466 static void
 4467 xpt_config(void *arg)
 4468 {
 4469         /*
 4470          * Now that interrupts are enabled, go find our devices
 4471          */
 4472 
 4473 #ifdef CAMDEBUG
 4474         /* Setup debugging flags and path */
 4475 #ifdef CAM_DEBUG_FLAGS
 4476         cam_dflags = CAM_DEBUG_FLAGS;
 4477 #else /* !CAM_DEBUG_FLAGS */
 4478         cam_dflags = CAM_DEBUG_NONE;
 4479 #endif /* CAM_DEBUG_FLAGS */
 4480 #ifdef CAM_DEBUG_BUS
 4481         if (cam_dflags != CAM_DEBUG_NONE) {
 4482                 /*
 4483                  * Locking is specifically omitted here.  No SIMs have
 4484                  * registered yet, so xpt_create_path will only be searching
 4485                  * empty lists of targets and devices.
 4486                  */
 4487                 if (xpt_create_path(&cam_dpath, xpt_periph,
 4488                                     CAM_DEBUG_BUS, CAM_DEBUG_TARGET,
 4489                                     CAM_DEBUG_LUN) != CAM_REQ_CMP) {
 4490                         printf("xpt_config: xpt_create_path() failed for debug"
 4491                                " target %d:%d:%d, debugging disabled\n",
 4492                                CAM_DEBUG_BUS, CAM_DEBUG_TARGET, CAM_DEBUG_LUN);
 4493                         cam_dflags = CAM_DEBUG_NONE;
 4494                 }
 4495         } else
 4496                 cam_dpath = NULL;
 4497 #else /* !CAM_DEBUG_BUS */
 4498         cam_dpath = NULL;
 4499 #endif /* CAM_DEBUG_BUS */
 4500 #endif /* CAMDEBUG */
 4501 
 4502         periphdriver_init(1);
 4503         xpt_hold_boot();
 4504         callout_init(&xsoftc.boot_callout, 1);
 4505         callout_reset(&xsoftc.boot_callout, hz * xsoftc.boot_delay / 1000,
 4506             xpt_boot_delay, NULL);
 4507         /* Fire up rescan thread. */
 4508         if (kproc_create(xpt_scanner_thread, NULL, NULL, 0, 0, "xpt_thrd")) {
 4509                 printf("xpt_init: failed to create rescan thread\n");
 4510         }
 4511 }
 4512 
 4513 void
 4514 xpt_hold_boot(void)
 4515 {
 4516         xpt_lock_buses();
 4517         xsoftc.buses_to_config++;
 4518         xpt_unlock_buses();
 4519 }
 4520 
 4521 void
 4522 xpt_release_boot(void)
 4523 {
 4524         xpt_lock_buses();
 4525         xsoftc.buses_to_config--;
 4526         if (xsoftc.buses_to_config == 0 && xsoftc.buses_config_done == 0) {
 4527                 struct  xpt_task *task;
 4528 
 4529                 xsoftc.buses_config_done = 1;
 4530                 xpt_unlock_buses();
 4531                 /* Call manually because we don't have any busses */
 4532                 task = malloc(sizeof(struct xpt_task), M_CAMXPT, M_NOWAIT);
 4533                 if (task != NULL) {
 4534                         TASK_INIT(&task->task, 0, xpt_finishconfig_task, task);
 4535                         taskqueue_enqueue(taskqueue_thread, &task->task);
 4536                 }
 4537         } else
 4538                 xpt_unlock_buses();
 4539 }
 4540 
 4541 /*
 4542  * If the given device only has one peripheral attached to it, and if that
 4543  * peripheral is the passthrough driver, announce it.  This insures that the
 4544  * user sees some sort of announcement for every peripheral in their system.
 4545  */
 4546 static int
 4547 xptpassannouncefunc(struct cam_ed *device, void *arg)
 4548 {
 4549         struct cam_periph *periph;
 4550         int i;
 4551 
 4552         for (periph = SLIST_FIRST(&device->periphs), i = 0; periph != NULL;
 4553              periph = SLIST_NEXT(periph, periph_links), i++);
 4554 
 4555         periph = SLIST_FIRST(&device->periphs);
 4556         if ((i == 1)
 4557          && (strncmp(periph->periph_name, "pass", 4) == 0))
 4558                 xpt_announce_periph(periph, NULL);
 4559 
 4560         return(1);
 4561 }
 4562 
 4563 static void
 4564 xpt_finishconfig_task(void *context, int pending)
 4565 {
 4566 
 4567         periphdriver_init(2);
 4568         /*
 4569          * Check for devices with no "standard" peripheral driver
 4570          * attached.  For any devices like that, announce the
 4571          * passthrough driver so the user will see something.
 4572          */
 4573         xpt_for_all_devices(xptpassannouncefunc, NULL);
 4574 
 4575         /* Release our hook so that the boot can continue. */
 4576         config_intrhook_disestablish(xsoftc.xpt_config_hook);
 4577         free(xsoftc.xpt_config_hook, M_CAMXPT);
 4578         xsoftc.xpt_config_hook = NULL;
 4579 
 4580         free(context, M_CAMXPT);
 4581 }
 4582 
 4583 cam_status
 4584 xpt_register_async(int event, ac_callback_t *cbfunc, void *cbarg,
 4585                    struct cam_path *path)
 4586 {
 4587         struct ccb_setasync csa;
 4588         cam_status status;
 4589         int xptpath = 0;
 4590 
 4591         if (path == NULL) {
 4592                 mtx_lock(&xsoftc.xpt_lock);
 4593                 status = xpt_create_path(&path, /*periph*/NULL, CAM_XPT_PATH_ID,
 4594                                          CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
 4595                 if (status != CAM_REQ_CMP) {
 4596                         mtx_unlock(&xsoftc.xpt_lock);
 4597                         return (status);
 4598                 }
 4599                 xptpath = 1;
 4600         }
 4601 
 4602         xpt_setup_ccb(&csa.ccb_h, path, CAM_PRIORITY_NORMAL);
 4603         csa.ccb_h.func_code = XPT_SASYNC_CB;
 4604         csa.event_enable = event;
 4605         csa.callback = cbfunc;
 4606         csa.callback_arg = cbarg;
 4607         xpt_action((union ccb *)&csa);
 4608         status = csa.ccb_h.status;
 4609         if (xptpath) {
 4610                 xpt_free_path(path);
 4611                 mtx_unlock(&xsoftc.xpt_lock);
 4612 
 4613                 if ((status == CAM_REQ_CMP) &&
 4614                     (csa.event_enable & AC_FOUND_DEVICE)) {
 4615                         /*
 4616                          * Get this peripheral up to date with all
 4617                          * the currently existing devices.
 4618                          */
 4619                         xpt_for_all_devices(xptsetasyncfunc, &csa);
 4620                 }
 4621                 if ((status == CAM_REQ_CMP) &&
 4622                     (csa.event_enable & AC_PATH_REGISTERED)) {
 4623                         /*
 4624                          * Get this peripheral up to date with all
 4625                          * the currently existing busses.
 4626                          */
 4627                         xpt_for_all_busses(xptsetasyncbusfunc, &csa);
 4628                 }
 4629         }
 4630         return (status);
 4631 }
 4632 
 4633 static void
 4634 xptaction(struct cam_sim *sim, union ccb *work_ccb)
 4635 {
 4636         CAM_DEBUG(work_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xptaction\n"));
 4637 
 4638         switch (work_ccb->ccb_h.func_code) {
 4639         /* Common cases first */
 4640         case XPT_PATH_INQ:              /* Path routing inquiry */
 4641         {
 4642                 struct ccb_pathinq *cpi;
 4643 
 4644                 cpi = &work_ccb->cpi;
 4645                 cpi->version_num = 1; /* XXX??? */
 4646                 cpi->hba_inquiry = 0;
 4647                 cpi->target_sprt = 0;
 4648                 cpi->hba_misc = 0;
 4649                 cpi->hba_eng_cnt = 0;
 4650                 cpi->max_target = 0;
 4651                 cpi->max_lun = 0;
 4652                 cpi->initiator_id = 0;
 4653                 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
 4654                 strncpy(cpi->hba_vid, "", HBA_IDLEN);
 4655                 strncpy(cpi->dev_name, sim->sim_name, DEV_IDLEN);
 4656                 cpi->unit_number = sim->unit_number;
 4657                 cpi->bus_id = sim->bus_id;
 4658                 cpi->base_transfer_speed = 0;
 4659                 cpi->protocol = PROTO_UNSPECIFIED;
 4660                 cpi->protocol_version = PROTO_VERSION_UNSPECIFIED;
 4661                 cpi->transport = XPORT_UNSPECIFIED;
 4662                 cpi->transport_version = XPORT_VERSION_UNSPECIFIED;
 4663                 cpi->ccb_h.status = CAM_REQ_CMP;
 4664                 xpt_done(work_ccb);
 4665                 break;
 4666         }
 4667         default:
 4668                 work_ccb->ccb_h.status = CAM_REQ_INVALID;
 4669                 xpt_done(work_ccb);
 4670                 break;
 4671         }
 4672 }
 4673 
 4674 /*
 4675  * The xpt as a "controller" has no interrupt sources, so polling
 4676  * is a no-op.
 4677  */
 4678 static void
 4679 xptpoll(struct cam_sim *sim)
 4680 {
 4681 }
 4682 
 4683 void
 4684 xpt_lock_buses(void)
 4685 {
 4686         mtx_lock(&xsoftc.xpt_topo_lock);
 4687 }
 4688 
 4689 void
 4690 xpt_unlock_buses(void)
 4691 {
 4692         mtx_unlock(&xsoftc.xpt_topo_lock);
 4693 }
 4694 
 4695 static void
 4696 camisr(void *dummy)
 4697 {
 4698         cam_simq_t queue;
 4699         struct cam_sim *sim;
 4700 
 4701         mtx_lock(&cam_simq_lock);
 4702         TAILQ_INIT(&queue);
 4703         while (!TAILQ_EMPTY(&cam_simq)) {
 4704                 TAILQ_CONCAT(&queue, &cam_simq, links);
 4705                 mtx_unlock(&cam_simq_lock);
 4706 
 4707                 while ((sim = TAILQ_FIRST(&queue)) != NULL) {
 4708                         TAILQ_REMOVE(&queue, sim, links);
 4709                         CAM_SIM_LOCK(sim);
 4710                         sim->flags &= ~CAM_SIM_ON_DONEQ;
 4711                         camisr_runqueue(&sim->sim_doneq);
 4712                         CAM_SIM_UNLOCK(sim);
 4713                 }
 4714                 mtx_lock(&cam_simq_lock);
 4715         }
 4716         mtx_unlock(&cam_simq_lock);
 4717 }
 4718 
 4719 static void
 4720 camisr_runqueue(void *V_queue)
 4721 {
 4722         cam_isrq_t *queue = V_queue;
 4723         struct  ccb_hdr *ccb_h;
 4724 
 4725         while ((ccb_h = TAILQ_FIRST(queue)) != NULL) {
 4726                 int     runq;
 4727 
 4728                 TAILQ_REMOVE(queue, ccb_h, sim_links.tqe);
 4729                 ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
 4730 
 4731                 CAM_DEBUG(ccb_h->path, CAM_DEBUG_TRACE,
 4732                           ("camisr\n"));
 4733 
 4734                 runq = FALSE;
 4735 
 4736                 if (ccb_h->flags & CAM_HIGH_POWER) {
 4737                         struct highpowerlist    *hphead;
 4738                         union ccb               *send_ccb;
 4739 
 4740                         mtx_lock(&xsoftc.xpt_lock);
 4741                         hphead = &xsoftc.highpowerq;
 4742 
 4743                         send_ccb = (union ccb *)STAILQ_FIRST(hphead);
 4744 
 4745                         /*
 4746                          * Increment the count since this command is done.
 4747                          */
 4748                         xsoftc.num_highpower++;
 4749 
 4750                         /*
 4751                          * Any high powered commands queued up?
 4752                          */
 4753                         if (send_ccb != NULL) {
 4754 
 4755                                 STAILQ_REMOVE_HEAD(hphead, xpt_links.stqe);
 4756                                 mtx_unlock(&xsoftc.xpt_lock);
 4757 
 4758                                 xpt_release_devq(send_ccb->ccb_h.path,
 4759                                                  /*count*/1, /*runqueue*/TRUE);
 4760                         } else
 4761                                 mtx_unlock(&xsoftc.xpt_lock);
 4762                 }
 4763 
 4764                 if ((ccb_h->func_code & XPT_FC_USER_CCB) == 0) {
 4765                         struct cam_ed *dev;
 4766 
 4767                         dev = ccb_h->path->device;
 4768 
 4769                         cam_ccbq_ccb_done(&dev->ccbq, (union ccb *)ccb_h);
 4770                         ccb_h->path->bus->sim->devq->send_active--;
 4771                         ccb_h->path->bus->sim->devq->send_openings++;
 4772                         runq = TRUE;
 4773 
 4774                         if (((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0
 4775                           && (ccb_h->status&CAM_STATUS_MASK) != CAM_REQUEUE_REQ)
 4776                          || ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
 4777                           && (dev->ccbq.dev_active == 0))) {
 4778                                 xpt_release_devq(ccb_h->path, /*count*/1,
 4779                                                  /*run_queue*/FALSE);
 4780                         }
 4781 
 4782                         if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
 4783                          && (--dev->tag_delay_count == 0))
 4784                                 xpt_start_tags(ccb_h->path);
 4785                         if (!device_is_send_queued(dev))
 4786                                 xpt_schedule_dev_sendq(ccb_h->path->bus, dev);
 4787                 }
 4788 
 4789                 if (ccb_h->status & CAM_RELEASE_SIMQ) {
 4790                         xpt_release_simq(ccb_h->path->bus->sim,
 4791                                          /*run_queue*/TRUE);
 4792                         ccb_h->status &= ~CAM_RELEASE_SIMQ;
 4793                         runq = FALSE;
 4794                 }
 4795 
 4796                 if ((ccb_h->flags & CAM_DEV_QFRZDIS)
 4797                  && (ccb_h->status & CAM_DEV_QFRZN)) {
 4798                         xpt_release_devq(ccb_h->path, /*count*/1,
 4799                                          /*run_queue*/TRUE);
 4800                         ccb_h->status &= ~CAM_DEV_QFRZN;
 4801                 } else if (runq) {
 4802                         xpt_run_dev_sendq(ccb_h->path->bus);
 4803                 }
 4804 
 4805                 /* Call the peripheral driver's callback */
 4806                 (*ccb_h->cbfcnp)(ccb_h->path->periph, (union ccb *)ccb_h);
 4807         }
 4808 }

Cache object: 833c9b7415d1f833e74b0e47140b7f15


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.