The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/cam/cam_xpt.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Implementation of the Common Access Method Transport (XPT) layer.
    3  *
    4  * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs.
    5  * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry.
    6  * All rights reserved.
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions, and the following disclaimer,
   13  *    without modification, immediately at the beginning of the file.
   14  * 2. The name of the author may not be used to endorse or promote products
   15  *    derived from this software without specific prior written permission.
   16  *
   17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   20  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
   21  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   27  * SUCH DAMAGE.
   28  */
   29 
   30 #include <sys/cdefs.h>
   31 __FBSDID("$FreeBSD: releng/8.0/sys/cam/cam_xpt.c 197318 2009-09-18 20:35:05Z ken $");
   32 
   33 #include <sys/param.h>
   34 #include <sys/bus.h>
   35 #include <sys/systm.h>
   36 #include <sys/types.h>
   37 #include <sys/malloc.h>
   38 #include <sys/kernel.h>
   39 #include <sys/time.h>
   40 #include <sys/conf.h>
   41 #include <sys/fcntl.h>
   42 #include <sys/md5.h>
   43 #include <sys/interrupt.h>
   44 #include <sys/sbuf.h>
   45 #include <sys/taskqueue.h>
   46 
   47 #include <sys/lock.h>
   48 #include <sys/mutex.h>
   49 #include <sys/sysctl.h>
   50 #include <sys/kthread.h>
   51 
   52 #ifdef PC98
   53 #include <pc98/pc98/pc98_machdep.h>     /* geometry translation */
   54 #endif
   55 
   56 #include <cam/cam.h>
   57 #include <cam/cam_ccb.h>
   58 #include <cam/cam_periph.h>
   59 #include <cam/cam_queue.h>
   60 #include <cam/cam_sim.h>
   61 #include <cam/cam_xpt.h>
   62 #include <cam/cam_xpt_sim.h>
   63 #include <cam/cam_xpt_periph.h>
   64 #include <cam/cam_xpt_internal.h>
   65 #include <cam/cam_debug.h>
   66 
   67 #include <cam/scsi/scsi_all.h>
   68 #include <cam/scsi/scsi_message.h>
   69 #include <cam/scsi/scsi_pass.h>
   70 #include <machine/stdarg.h>     /* for xpt_print below */
   71 #include "opt_cam.h"
   72 
   73 /*
   74  * This is the maximum number of high powered commands (e.g. start unit)
   75  * that can be outstanding at a particular time.
   76  */
   77 #ifndef CAM_MAX_HIGHPOWER
   78 #define CAM_MAX_HIGHPOWER  4
   79 #endif
   80 
   81 /* Datastructures internal to the xpt layer */
   82 MALLOC_DEFINE(M_CAMXPT, "CAM XPT", "CAM XPT buffers");
   83 
   84 /* Object for defering XPT actions to a taskqueue */
   85 struct xpt_task {
   86         struct task     task;
   87         void            *data1;
   88         uintptr_t       data2;
   89 };
   90 
   91 typedef enum {
   92         XPT_FLAG_OPEN           = 0x01
   93 } xpt_flags;
   94 
   95 struct xpt_softc {
   96         xpt_flags               flags;
   97         u_int32_t               xpt_generation;
   98 
   99         /* number of high powered commands that can go through right now */
  100         STAILQ_HEAD(highpowerlist, ccb_hdr)     highpowerq;
  101         int                     num_highpower;
  102 
  103         /* queue for handling async rescan requests. */
  104         TAILQ_HEAD(, ccb_hdr) ccb_scanq;
  105 
  106         /* Registered busses */
  107         TAILQ_HEAD(,cam_eb)     xpt_busses;
  108         u_int                   bus_generation;
  109 
  110         struct intr_config_hook *xpt_config_hook;
  111 
  112         struct mtx              xpt_topo_lock;
  113         struct mtx              xpt_lock;
  114 };
  115 
  116 typedef enum {
  117         DM_RET_COPY             = 0x01,
  118         DM_RET_FLAG_MASK        = 0x0f,
  119         DM_RET_NONE             = 0x00,
  120         DM_RET_STOP             = 0x10,
  121         DM_RET_DESCEND          = 0x20,
  122         DM_RET_ERROR            = 0x30,
  123         DM_RET_ACTION_MASK      = 0xf0
  124 } dev_match_ret;
  125 
  126 typedef enum {
  127         XPT_DEPTH_BUS,
  128         XPT_DEPTH_TARGET,
  129         XPT_DEPTH_DEVICE,
  130         XPT_DEPTH_PERIPH
  131 } xpt_traverse_depth;
  132 
  133 struct xpt_traverse_config {
  134         xpt_traverse_depth      depth;
  135         void                    *tr_func;
  136         void                    *tr_arg;
  137 };
  138 
  139 typedef int     xpt_busfunc_t (struct cam_eb *bus, void *arg);
  140 typedef int     xpt_targetfunc_t (struct cam_et *target, void *arg);
  141 typedef int     xpt_devicefunc_t (struct cam_ed *device, void *arg);
  142 typedef int     xpt_periphfunc_t (struct cam_periph *periph, void *arg);
  143 typedef int     xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg);
  144 
  145 /* Transport layer configuration information */
  146 static struct xpt_softc xsoftc;
  147 
  148 /* Queues for our software interrupt handler */
  149 typedef TAILQ_HEAD(cam_isrq, ccb_hdr) cam_isrq_t;
  150 typedef TAILQ_HEAD(cam_simq, cam_sim) cam_simq_t;
  151 static cam_simq_t cam_simq;
  152 static struct mtx cam_simq_lock;
  153 
  154 /* Pointers to software interrupt handlers */
  155 static void *cambio_ih;
  156 
  157 struct cam_periph *xpt_periph;
  158 
  159 static periph_init_t xpt_periph_init;
  160 
  161 static struct periph_driver xpt_driver =
  162 {
  163         xpt_periph_init, "xpt",
  164         TAILQ_HEAD_INITIALIZER(xpt_driver.units)
  165 };
  166 
  167 PERIPHDRIVER_DECLARE(xpt, xpt_driver);
  168 
  169 static d_open_t xptopen;
  170 static d_close_t xptclose;
  171 static d_ioctl_t xptioctl;
  172 
  173 static struct cdevsw xpt_cdevsw = {
  174         .d_version =    D_VERSION,
  175         .d_flags =      0,
  176         .d_open =       xptopen,
  177         .d_close =      xptclose,
  178         .d_ioctl =      xptioctl,
  179         .d_name =       "xpt",
  180 };
  181 
  182 /* Storage for debugging datastructures */
  183 #ifdef  CAMDEBUG
  184 struct cam_path *cam_dpath;
  185 u_int32_t cam_dflags;
  186 u_int32_t cam_debug_delay;
  187 #endif
  188 
  189 /* Our boot-time initialization hook */
  190 static int cam_module_event_handler(module_t, int /*modeventtype_t*/, void *);
  191 
  192 static moduledata_t cam_moduledata = {
  193         "cam",
  194         cam_module_event_handler,
  195         NULL
  196 };
  197 
  198 static int      xpt_init(void *);
  199 
  200 DECLARE_MODULE(cam, cam_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND);
  201 MODULE_VERSION(cam, 1);
  202 
  203 
  204 static void             xpt_async_bcast(struct async_list *async_head,
  205                                         u_int32_t async_code,
  206                                         struct cam_path *path,
  207                                         void *async_arg);
  208 static path_id_t xptnextfreepathid(void);
  209 static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus);
  210 static union ccb *xpt_get_ccb(struct cam_ed *device);
  211 static void      xpt_run_dev_allocq(struct cam_eb *bus);
  212 static timeout_t xpt_release_devq_timeout;
  213 static void      xpt_release_simq_timeout(void *arg) __unused;
  214 static void      xpt_release_bus(struct cam_eb *bus);
  215 static void      xpt_release_devq_device(struct cam_ed *dev, u_int count,
  216                                          int run_queue);
  217 static struct cam_et*
  218                  xpt_alloc_target(struct cam_eb *bus, target_id_t target_id);
  219 static void      xpt_release_target(struct cam_eb *bus, struct cam_et *target);
  220 static void      xpt_release_device(struct cam_eb *bus, struct cam_et *target,
  221                                     struct cam_ed *device);
  222 static struct cam_eb*
  223                  xpt_find_bus(path_id_t path_id);
  224 static struct cam_et*
  225                  xpt_find_target(struct cam_eb *bus, target_id_t target_id);
  226 static struct cam_ed*
  227                  xpt_find_device(struct cam_et *target, lun_id_t lun_id);
  228 static xpt_busfunc_t    xptconfigbuscountfunc;
  229 static xpt_busfunc_t    xptconfigfunc;
  230 static void      xpt_config(void *arg);
  231 static xpt_devicefunc_t xptpassannouncefunc;
  232 static void      xpt_finishconfig(struct cam_periph *periph, union ccb *ccb);
  233 static void      xptaction(struct cam_sim *sim, union ccb *work_ccb);
  234 static void      xptpoll(struct cam_sim *sim);
  235 static void      camisr(void *);
  236 static void      camisr_runqueue(void *);
  237 static dev_match_ret    xptbusmatch(struct dev_match_pattern *patterns,
  238                                     u_int num_patterns, struct cam_eb *bus);
  239 static dev_match_ret    xptdevicematch(struct dev_match_pattern *patterns,
  240                                        u_int num_patterns,
  241                                        struct cam_ed *device);
  242 static dev_match_ret    xptperiphmatch(struct dev_match_pattern *patterns,
  243                                        u_int num_patterns,
  244                                        struct cam_periph *periph);
  245 static xpt_busfunc_t    xptedtbusfunc;
  246 static xpt_targetfunc_t xptedttargetfunc;
  247 static xpt_devicefunc_t xptedtdevicefunc;
  248 static xpt_periphfunc_t xptedtperiphfunc;
  249 static xpt_pdrvfunc_t   xptplistpdrvfunc;
  250 static xpt_periphfunc_t xptplistperiphfunc;
  251 static int              xptedtmatch(struct ccb_dev_match *cdm);
  252 static int              xptperiphlistmatch(struct ccb_dev_match *cdm);
  253 static int              xptbustraverse(struct cam_eb *start_bus,
  254                                        xpt_busfunc_t *tr_func, void *arg);
  255 static int              xpttargettraverse(struct cam_eb *bus,
  256                                           struct cam_et *start_target,
  257                                           xpt_targetfunc_t *tr_func, void *arg);
  258 static int              xptdevicetraverse(struct cam_et *target,
  259                                           struct cam_ed *start_device,
  260                                           xpt_devicefunc_t *tr_func, void *arg);
  261 static int              xptperiphtraverse(struct cam_ed *device,
  262                                           struct cam_periph *start_periph,
  263                                           xpt_periphfunc_t *tr_func, void *arg);
  264 static int              xptpdrvtraverse(struct periph_driver **start_pdrv,
  265                                         xpt_pdrvfunc_t *tr_func, void *arg);
  266 static int              xptpdperiphtraverse(struct periph_driver **pdrv,
  267                                             struct cam_periph *start_periph,
  268                                             xpt_periphfunc_t *tr_func,
  269                                             void *arg);
  270 static xpt_busfunc_t    xptdefbusfunc;
  271 static xpt_targetfunc_t xptdeftargetfunc;
  272 static xpt_devicefunc_t xptdefdevicefunc;
  273 static xpt_periphfunc_t xptdefperiphfunc;
  274 static int              xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg);
  275 static int              xpt_for_all_devices(xpt_devicefunc_t *tr_func,
  276                                             void *arg);
  277 static void             xpt_dev_async_default(u_int32_t async_code,
  278                                               struct cam_eb *bus,
  279                                               struct cam_et *target,
  280                                               struct cam_ed *device,
  281                                               void *async_arg);
  282 static struct cam_ed *  xpt_alloc_device_default(struct cam_eb *bus,
  283                                                  struct cam_et *target,
  284                                                  lun_id_t lun_id);
  285 static xpt_devicefunc_t xptsetasyncfunc;
  286 static xpt_busfunc_t    xptsetasyncbusfunc;
  287 static cam_status       xptregister(struct cam_periph *periph,
  288                                     void *arg);
  289 static void      xpt_start_tags(struct cam_path *path);
  290 static __inline int xpt_schedule_dev_allocq(struct cam_eb *bus,
  291                                             struct cam_ed *dev);
  292 static __inline int periph_is_queued(struct cam_periph *periph);
  293 static __inline int device_is_alloc_queued(struct cam_ed *device);
  294 static __inline int device_is_send_queued(struct cam_ed *device);
  295 static __inline int dev_allocq_is_runnable(struct cam_devq *devq);
  296 
  297 static __inline int
  298 xpt_schedule_dev_allocq(struct cam_eb *bus, struct cam_ed *dev)
  299 {
  300         int retval;
  301 
  302         if (dev->ccbq.devq_openings > 0) {
  303                 if ((dev->flags & CAM_DEV_RESIZE_QUEUE_NEEDED) != 0) {
  304                         cam_ccbq_resize(&dev->ccbq,
  305                                         dev->ccbq.dev_openings
  306                                         + dev->ccbq.dev_active);
  307                         dev->flags &= ~CAM_DEV_RESIZE_QUEUE_NEEDED;
  308                 }
  309                 /*
  310                  * The priority of a device waiting for CCB resources
  311                  * is that of the the highest priority peripheral driver
  312                  * enqueued.
  313                  */
  314                 retval = xpt_schedule_dev(&bus->sim->devq->alloc_queue,
  315                                           &dev->alloc_ccb_entry.pinfo,
  316                                           CAMQ_GET_HEAD(&dev->drvq)->priority);
  317         } else {
  318                 retval = 0;
  319         }
  320 
  321         return (retval);
  322 }
  323 
  324 static __inline int
  325 periph_is_queued(struct cam_periph *periph)
  326 {
  327         return (periph->pinfo.index != CAM_UNQUEUED_INDEX);
  328 }
  329 
  330 static __inline int
  331 device_is_alloc_queued(struct cam_ed *device)
  332 {
  333         return (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
  334 }
  335 
  336 static __inline int
  337 device_is_send_queued(struct cam_ed *device)
  338 {
  339         return (device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
  340 }
  341 
  342 static __inline int
  343 dev_allocq_is_runnable(struct cam_devq *devq)
  344 {
  345         /*
  346          * Have work to do.
  347          * Have space to do more work.
  348          * Allowed to do work.
  349          */
  350         return ((devq->alloc_queue.qfrozen_cnt == 0)
  351              && (devq->alloc_queue.entries > 0)
  352              && (devq->alloc_openings > 0));
  353 }
  354 
  355 static void
  356 xpt_periph_init()
  357 {
  358         make_dev(&xpt_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "xpt0");
  359 }
  360 
  361 static void
  362 xptdone(struct cam_periph *periph, union ccb *done_ccb)
  363 {
  364         /* Caller will release the CCB */
  365         wakeup(&done_ccb->ccb_h.cbfcnp);
  366 }
  367 
  368 static int
  369 xptopen(struct cdev *dev, int flags, int fmt, struct thread *td)
  370 {
  371 
  372         /*
  373          * Only allow read-write access.
  374          */
  375         if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0))
  376                 return(EPERM);
  377 
  378         /*
  379          * We don't allow nonblocking access.
  380          */
  381         if ((flags & O_NONBLOCK) != 0) {
  382                 printf("%s: can't do nonblocking access\n", devtoname(dev));
  383                 return(ENODEV);
  384         }
  385 
  386         /* Mark ourselves open */
  387         mtx_lock(&xsoftc.xpt_lock);
  388         xsoftc.flags |= XPT_FLAG_OPEN;
  389         mtx_unlock(&xsoftc.xpt_lock);
  390 
  391         return(0);
  392 }
  393 
  394 static int
  395 xptclose(struct cdev *dev, int flag, int fmt, struct thread *td)
  396 {
  397 
  398         /* Mark ourselves closed */
  399         mtx_lock(&xsoftc.xpt_lock);
  400         xsoftc.flags &= ~XPT_FLAG_OPEN;
  401         mtx_unlock(&xsoftc.xpt_lock);
  402 
  403         return(0);
  404 }
  405 
  406 /*
  407  * Don't automatically grab the xpt softc lock here even though this is going
  408  * through the xpt device.  The xpt device is really just a back door for
  409  * accessing other devices and SIMs, so the right thing to do is to grab
  410  * the appropriate SIM lock once the bus/SIM is located.
  411  */
  412 static int
  413 xptioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
  414 {
  415         int error;
  416 
  417         error = 0;
  418 
  419         switch(cmd) {
  420         /*
  421          * For the transport layer CAMIOCOMMAND ioctl, we really only want
  422          * to accept CCB types that don't quite make sense to send through a
  423          * passthrough driver. XPT_PATH_INQ is an exception to this, as stated
  424          * in the CAM spec.
  425          */
  426         case CAMIOCOMMAND: {
  427                 union ccb *ccb;
  428                 union ccb *inccb;
  429                 struct cam_eb *bus;
  430 
  431                 inccb = (union ccb *)addr;
  432 
  433                 bus = xpt_find_bus(inccb->ccb_h.path_id);
  434                 if (bus == NULL) {
  435                         error = EINVAL;
  436                         break;
  437                 }
  438 
  439                 switch(inccb->ccb_h.func_code) {
  440                 case XPT_SCAN_BUS:
  441                 case XPT_RESET_BUS:
  442                         if ((inccb->ccb_h.target_id != CAM_TARGET_WILDCARD)
  443                          || (inccb->ccb_h.target_lun != CAM_LUN_WILDCARD)) {
  444                                 error = EINVAL;
  445                                 break;
  446                         }
  447                         /* FALLTHROUGH */
  448                 case XPT_PATH_INQ:
  449                 case XPT_ENG_INQ:
  450                 case XPT_SCAN_LUN:
  451 
  452                         ccb = xpt_alloc_ccb();
  453 
  454                         CAM_SIM_LOCK(bus->sim);
  455 
  456                         /*
  457                          * Create a path using the bus, target, and lun the
  458                          * user passed in.
  459                          */
  460                         if (xpt_create_path(&ccb->ccb_h.path, xpt_periph,
  461                                             inccb->ccb_h.path_id,
  462                                             inccb->ccb_h.target_id,
  463                                             inccb->ccb_h.target_lun) !=
  464                                             CAM_REQ_CMP){
  465                                 error = EINVAL;
  466                                 CAM_SIM_UNLOCK(bus->sim);
  467                                 xpt_free_ccb(ccb);
  468                                 break;
  469                         }
  470                         /* Ensure all of our fields are correct */
  471                         xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path,
  472                                       inccb->ccb_h.pinfo.priority);
  473                         xpt_merge_ccb(ccb, inccb);
  474                         ccb->ccb_h.cbfcnp = xptdone;
  475                         cam_periph_runccb(ccb, NULL, 0, 0, NULL);
  476                         bcopy(ccb, inccb, sizeof(union ccb));
  477                         xpt_free_path(ccb->ccb_h.path);
  478                         xpt_free_ccb(ccb);
  479                         CAM_SIM_UNLOCK(bus->sim);
  480                         break;
  481 
  482                 case XPT_DEBUG: {
  483                         union ccb ccb;
  484 
  485                         /*
  486                          * This is an immediate CCB, so it's okay to
  487                          * allocate it on the stack.
  488                          */
  489 
  490                         CAM_SIM_LOCK(bus->sim);
  491 
  492                         /*
  493                          * Create a path using the bus, target, and lun the
  494                          * user passed in.
  495                          */
  496                         if (xpt_create_path(&ccb.ccb_h.path, xpt_periph,
  497                                             inccb->ccb_h.path_id,
  498                                             inccb->ccb_h.target_id,
  499                                             inccb->ccb_h.target_lun) !=
  500                                             CAM_REQ_CMP){
  501                                 error = EINVAL;
  502                                 CAM_SIM_UNLOCK(bus->sim);
  503                                 break;
  504                         }
  505                         /* Ensure all of our fields are correct */
  506                         xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path,
  507                                       inccb->ccb_h.pinfo.priority);
  508                         xpt_merge_ccb(&ccb, inccb);
  509                         ccb.ccb_h.cbfcnp = xptdone;
  510                         xpt_action(&ccb);
  511                         CAM_SIM_UNLOCK(bus->sim);
  512                         bcopy(&ccb, inccb, sizeof(union ccb));
  513                         xpt_free_path(ccb.ccb_h.path);
  514                         break;
  515 
  516                 }
  517                 case XPT_DEV_MATCH: {
  518                         struct cam_periph_map_info mapinfo;
  519                         struct cam_path *old_path;
  520 
  521                         /*
  522                          * We can't deal with physical addresses for this
  523                          * type of transaction.
  524                          */
  525                         if (inccb->ccb_h.flags & CAM_DATA_PHYS) {
  526                                 error = EINVAL;
  527                                 break;
  528                         }
  529 
  530                         /*
  531                          * Save this in case the caller had it set to
  532                          * something in particular.
  533                          */
  534                         old_path = inccb->ccb_h.path;
  535 
  536                         /*
  537                          * We really don't need a path for the matching
  538                          * code.  The path is needed because of the
  539                          * debugging statements in xpt_action().  They
  540                          * assume that the CCB has a valid path.
  541                          */
  542                         inccb->ccb_h.path = xpt_periph->path;
  543 
  544                         bzero(&mapinfo, sizeof(mapinfo));
  545 
  546                         /*
  547                          * Map the pattern and match buffers into kernel
  548                          * virtual address space.
  549                          */
  550                         error = cam_periph_mapmem(inccb, &mapinfo);
  551 
  552                         if (error) {
  553                                 inccb->ccb_h.path = old_path;
  554                                 break;
  555                         }
  556 
  557                         /*
  558                          * This is an immediate CCB, we can send it on directly.
  559                          */
  560                         xpt_action(inccb);
  561 
  562                         /*
  563                          * Map the buffers back into user space.
  564                          */
  565                         cam_periph_unmapmem(inccb, &mapinfo);
  566 
  567                         inccb->ccb_h.path = old_path;
  568 
  569                         error = 0;
  570                         break;
  571                 }
  572                 default:
  573                         error = ENOTSUP;
  574                         break;
  575                 }
  576                 xpt_release_bus(bus);
  577                 break;
  578         }
  579         /*
  580          * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input,
  581          * with the periphal driver name and unit name filled in.  The other
  582          * fields don't really matter as input.  The passthrough driver name
  583          * ("pass"), and unit number are passed back in the ccb.  The current
  584          * device generation number, and the index into the device peripheral
  585          * driver list, and the status are also passed back.  Note that
  586          * since we do everything in one pass, unlike the XPT_GDEVLIST ccb,
  587          * we never return a status of CAM_GDEVLIST_LIST_CHANGED.  It is
  588          * (or rather should be) impossible for the device peripheral driver
  589          * list to change since we look at the whole thing in one pass, and
  590          * we do it with lock protection.
  591          *
  592          */
  593         case CAMGETPASSTHRU: {
  594                 union ccb *ccb;
  595                 struct cam_periph *periph;
  596                 struct periph_driver **p_drv;
  597                 char   *name;
  598                 u_int unit;
  599                 u_int cur_generation;
  600                 int base_periph_found;
  601                 int splbreaknum;
  602 
  603                 ccb = (union ccb *)addr;
  604                 unit = ccb->cgdl.unit_number;
  605                 name = ccb->cgdl.periph_name;
  606                 /*
  607                  * Every 100 devices, we want to drop our lock protection to
  608                  * give the software interrupt handler a chance to run.
  609                  * Most systems won't run into this check, but this should
  610                  * avoid starvation in the software interrupt handler in
  611                  * large systems.
  612                  */
  613                 splbreaknum = 100;
  614 
  615                 ccb = (union ccb *)addr;
  616 
  617                 base_periph_found = 0;
  618 
  619                 /*
  620                  * Sanity check -- make sure we don't get a null peripheral
  621                  * driver name.
  622                  */
  623                 if (*ccb->cgdl.periph_name == '\0') {
  624                         error = EINVAL;
  625                         break;
  626                 }
  627 
  628                 /* Keep the list from changing while we traverse it */
  629                 mtx_lock(&xsoftc.xpt_topo_lock);
  630 ptstartover:
  631                 cur_generation = xsoftc.xpt_generation;
  632 
  633                 /* first find our driver in the list of drivers */
  634                 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++)
  635                         if (strcmp((*p_drv)->driver_name, name) == 0)
  636                                 break;
  637 
  638                 if (*p_drv == NULL) {
  639                         mtx_unlock(&xsoftc.xpt_topo_lock);
  640                         ccb->ccb_h.status = CAM_REQ_CMP_ERR;
  641                         ccb->cgdl.status = CAM_GDEVLIST_ERROR;
  642                         *ccb->cgdl.periph_name = '\0';
  643                         ccb->cgdl.unit_number = 0;
  644                         error = ENOENT;
  645                         break;
  646                 }
  647 
  648                 /*
  649                  * Run through every peripheral instance of this driver
  650                  * and check to see whether it matches the unit passed
  651                  * in by the user.  If it does, get out of the loops and
  652                  * find the passthrough driver associated with that
  653                  * peripheral driver.
  654                  */
  655                 for (periph = TAILQ_FIRST(&(*p_drv)->units); periph != NULL;
  656                      periph = TAILQ_NEXT(periph, unit_links)) {
  657 
  658                         if (periph->unit_number == unit) {
  659                                 break;
  660                         } else if (--splbreaknum == 0) {
  661                                 mtx_unlock(&xsoftc.xpt_topo_lock);
  662                                 mtx_lock(&xsoftc.xpt_topo_lock);
  663                                 splbreaknum = 100;
  664                                 if (cur_generation != xsoftc.xpt_generation)
  665                                        goto ptstartover;
  666                         }
  667                 }
  668                 /*
  669                  * If we found the peripheral driver that the user passed
  670                  * in, go through all of the peripheral drivers for that
  671                  * particular device and look for a passthrough driver.
  672                  */
  673                 if (periph != NULL) {
  674                         struct cam_ed *device;
  675                         int i;
  676 
  677                         base_periph_found = 1;
  678                         device = periph->path->device;
  679                         for (i = 0, periph = SLIST_FIRST(&device->periphs);
  680                              periph != NULL;
  681                              periph = SLIST_NEXT(periph, periph_links), i++) {
  682                                 /*
  683                                  * Check to see whether we have a
  684                                  * passthrough device or not.
  685                                  */
  686                                 if (strcmp(periph->periph_name, "pass") == 0) {
  687                                         /*
  688                                          * Fill in the getdevlist fields.
  689                                          */
  690                                         strcpy(ccb->cgdl.periph_name,
  691                                                periph->periph_name);
  692                                         ccb->cgdl.unit_number =
  693                                                 periph->unit_number;
  694                                         if (SLIST_NEXT(periph, periph_links))
  695                                                 ccb->cgdl.status =
  696                                                         CAM_GDEVLIST_MORE_DEVS;
  697                                         else
  698                                                 ccb->cgdl.status =
  699                                                        CAM_GDEVLIST_LAST_DEVICE;
  700                                         ccb->cgdl.generation =
  701                                                 device->generation;
  702                                         ccb->cgdl.index = i;
  703                                         /*
  704                                          * Fill in some CCB header fields
  705                                          * that the user may want.
  706                                          */
  707                                         ccb->ccb_h.path_id =
  708                                                 periph->path->bus->path_id;
  709                                         ccb->ccb_h.target_id =
  710                                                 periph->path->target->target_id;
  711                                         ccb->ccb_h.target_lun =
  712                                                 periph->path->device->lun_id;
  713                                         ccb->ccb_h.status = CAM_REQ_CMP;
  714                                         break;
  715                                 }
  716                         }
  717                 }
  718 
  719                 /*
  720                  * If the periph is null here, one of two things has
  721                  * happened.  The first possibility is that we couldn't
  722                  * find the unit number of the particular peripheral driver
  723                  * that the user is asking about.  e.g. the user asks for
  724                  * the passthrough driver for "da11".  We find the list of
  725                  * "da" peripherals all right, but there is no unit 11.
  726                  * The other possibility is that we went through the list
  727                  * of peripheral drivers attached to the device structure,
  728                  * but didn't find one with the name "pass".  Either way,
  729                  * we return ENOENT, since we couldn't find something.
  730                  */
  731                 if (periph == NULL) {
  732                         ccb->ccb_h.status = CAM_REQ_CMP_ERR;
  733                         ccb->cgdl.status = CAM_GDEVLIST_ERROR;
  734                         *ccb->cgdl.periph_name = '\0';
  735                         ccb->cgdl.unit_number = 0;
  736                         error = ENOENT;
  737                         /*
  738                          * It is unfortunate that this is even necessary,
  739                          * but there are many, many clueless users out there.
  740                          * If this is true, the user is looking for the
  741                          * passthrough driver, but doesn't have one in his
  742                          * kernel.
  743                          */
  744                         if (base_periph_found == 1) {
  745                                 printf("xptioctl: pass driver is not in the "
  746                                        "kernel\n");
  747                                 printf("xptioctl: put \"device pass\" in "
  748                                        "your kernel config file\n");
  749                         }
  750                 }
  751                 mtx_unlock(&xsoftc.xpt_topo_lock);
  752                 break;
  753                 }
  754         default:
  755                 error = ENOTTY;
  756                 break;
  757         }
  758 
  759         return(error);
  760 }
  761 
  762 static int
  763 cam_module_event_handler(module_t mod, int what, void *arg)
  764 {
  765         int error;
  766 
  767         switch (what) {
  768         case MOD_LOAD:
  769                 if ((error = xpt_init(NULL)) != 0)
  770                         return (error);
  771                 break;
  772         case MOD_UNLOAD:
  773                 return EBUSY;
  774         default:
  775                 return EOPNOTSUPP;
  776         }
  777 
  778         return 0;
  779 }
  780 
  781 /* thread to handle bus rescans */
  782 static void
  783 xpt_scanner_thread(void *dummy)
  784 {
  785         cam_isrq_t      queue;
  786         union ccb       *ccb;
  787         struct cam_sim  *sim;
  788 
  789         for (;;) {
  790                 /*
  791                  * Wait for a rescan request to come in.  When it does, splice
  792                  * it onto a queue from local storage so that the xpt lock
  793                  * doesn't need to be held while the requests are being
  794                  * processed.
  795                  */
  796                 xpt_lock_buses();
  797                 if (TAILQ_EMPTY(&xsoftc.ccb_scanq))
  798                         msleep(&xsoftc.ccb_scanq, &xsoftc.xpt_topo_lock, PRIBIO,
  799                                "ccb_scanq", 0);
  800                 TAILQ_INIT(&queue);
  801                 TAILQ_CONCAT(&queue, &xsoftc.ccb_scanq, sim_links.tqe);
  802                 xpt_unlock_buses();
  803 
  804                 while ((ccb = (union ccb *)TAILQ_FIRST(&queue)) != NULL) {
  805                         TAILQ_REMOVE(&queue, &ccb->ccb_h, sim_links.tqe);
  806 
  807                         sim = ccb->ccb_h.path->bus->sim;
  808                         CAM_SIM_LOCK(sim);
  809 
  810                         if( ccb->ccb_h.path->target->target_id == CAM_TARGET_WILDCARD )
  811                                 ccb->ccb_h.func_code = XPT_SCAN_BUS;
  812                         else
  813                                 ccb->ccb_h.func_code = XPT_SCAN_LUN;
  814                         ccb->ccb_h.cbfcnp = xptdone;
  815                         xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, 1);
  816                         cam_periph_runccb(ccb, NULL, 0, 0, NULL);
  817                         xpt_free_path(ccb->ccb_h.path);
  818                         xpt_free_ccb(ccb);
  819                         CAM_SIM_UNLOCK(sim);
  820                 }
  821         }
  822 }
  823 
  824 void
  825 xpt_rescan(union ccb *ccb)
  826 {
  827         struct ccb_hdr *hdr;
  828 
  829         /*
  830          * Don't make duplicate entries for the same paths.
  831          */
  832         xpt_lock_buses();
  833         TAILQ_FOREACH(hdr, &xsoftc.ccb_scanq, sim_links.tqe) {
  834                 if (xpt_path_comp(hdr->path, ccb->ccb_h.path) == 0) {
  835                         wakeup(&xsoftc.ccb_scanq);
  836                         xpt_unlock_buses();
  837                         xpt_print(ccb->ccb_h.path, "rescan already queued\n");
  838                         xpt_free_path(ccb->ccb_h.path);
  839                         xpt_free_ccb(ccb);
  840                         return;
  841                 }
  842         }
  843         TAILQ_INSERT_TAIL(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe);
  844         wakeup(&xsoftc.ccb_scanq);
  845         xpt_unlock_buses();
  846 }
  847 
  848 /* Functions accessed by the peripheral drivers */
  849 static int
  850 xpt_init(void *dummy)
  851 {
  852         struct cam_sim *xpt_sim;
  853         struct cam_path *path;
  854         struct cam_devq *devq;
  855         cam_status status;
  856 
  857         TAILQ_INIT(&xsoftc.xpt_busses);
  858         TAILQ_INIT(&cam_simq);
  859         TAILQ_INIT(&xsoftc.ccb_scanq);
  860         STAILQ_INIT(&xsoftc.highpowerq);
  861         xsoftc.num_highpower = CAM_MAX_HIGHPOWER;
  862 
  863         mtx_init(&cam_simq_lock, "CAM SIMQ lock", NULL, MTX_DEF);
  864         mtx_init(&xsoftc.xpt_lock, "XPT lock", NULL, MTX_DEF);
  865         mtx_init(&xsoftc.xpt_topo_lock, "XPT topology lock", NULL, MTX_DEF);
  866 
  867         /*
  868          * The xpt layer is, itself, the equivelent of a SIM.
  869          * Allow 16 ccbs in the ccb pool for it.  This should
  870          * give decent parallelism when we probe busses and
  871          * perform other XPT functions.
  872          */
  873         devq = cam_simq_alloc(16);
  874         xpt_sim = cam_sim_alloc(xptaction,
  875                                 xptpoll,
  876                                 "xpt",
  877                                 /*softc*/NULL,
  878                                 /*unit*/0,
  879                                 /*mtx*/&xsoftc.xpt_lock,
  880                                 /*max_dev_transactions*/0,
  881                                 /*max_tagged_dev_transactions*/0,
  882                                 devq);
  883         if (xpt_sim == NULL)
  884                 return (ENOMEM);
  885 
  886         xpt_sim->max_ccbs = 16;
  887 
  888         mtx_lock(&xsoftc.xpt_lock);
  889         if ((status = xpt_bus_register(xpt_sim, NULL, 0)) != CAM_SUCCESS) {
  890                 printf("xpt_init: xpt_bus_register failed with status %#x,"
  891                        " failing attach\n", status);
  892                 return (EINVAL);
  893         }
  894 
  895         /*
  896          * Looking at the XPT from the SIM layer, the XPT is
  897          * the equivelent of a peripheral driver.  Allocate
  898          * a peripheral driver entry for us.
  899          */
  900         if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID,
  901                                       CAM_TARGET_WILDCARD,
  902                                       CAM_LUN_WILDCARD)) != CAM_REQ_CMP) {
  903                 printf("xpt_init: xpt_create_path failed with status %#x,"
  904                        " failing attach\n", status);
  905                 return (EINVAL);
  906         }
  907 
  908         cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO,
  909                          path, NULL, 0, xpt_sim);
  910         xpt_free_path(path);
  911         mtx_unlock(&xsoftc.xpt_lock);
  912 
  913         /*
  914          * Register a callback for when interrupts are enabled.
  915          */
  916         xsoftc.xpt_config_hook =
  917             (struct intr_config_hook *)malloc(sizeof(struct intr_config_hook),
  918                                               M_CAMXPT, M_NOWAIT | M_ZERO);
  919         if (xsoftc.xpt_config_hook == NULL) {
  920                 printf("xpt_init: Cannot malloc config hook "
  921                        "- failing attach\n");
  922                 return (ENOMEM);
  923         }
  924 
  925         xsoftc.xpt_config_hook->ich_func = xpt_config;
  926         if (config_intrhook_establish(xsoftc.xpt_config_hook) != 0) {
  927                 free (xsoftc.xpt_config_hook, M_CAMXPT);
  928                 printf("xpt_init: config_intrhook_establish failed "
  929                        "- failing attach\n");
  930         }
  931 
  932         /* fire up rescan thread */
  933         if (kproc_create(xpt_scanner_thread, NULL, NULL, 0, 0, "xpt_thrd")) {
  934                 printf("xpt_init: failed to create rescan thread\n");
  935         }
  936         /* Install our software interrupt handlers */
  937         swi_add(NULL, "cambio", camisr, NULL, SWI_CAMBIO, INTR_MPSAFE, &cambio_ih);
  938 
  939         return (0);
  940 }
  941 
  942 static cam_status
  943 xptregister(struct cam_periph *periph, void *arg)
  944 {
  945         struct cam_sim *xpt_sim;
  946 
  947         if (periph == NULL) {
  948                 printf("xptregister: periph was NULL!!\n");
  949                 return(CAM_REQ_CMP_ERR);
  950         }
  951 
  952         xpt_sim = (struct cam_sim *)arg;
  953         xpt_sim->softc = periph;
  954         xpt_periph = periph;
  955         periph->softc = NULL;
  956 
  957         return(CAM_REQ_CMP);
  958 }
  959 
  960 int32_t
  961 xpt_add_periph(struct cam_periph *periph)
  962 {
  963         struct cam_ed *device;
  964         int32_t  status;
  965         struct periph_list *periph_head;
  966 
  967         mtx_assert(periph->sim->mtx, MA_OWNED);
  968 
  969         device = periph->path->device;
  970 
  971         periph_head = &device->periphs;
  972 
  973         status = CAM_REQ_CMP;
  974 
  975         if (device != NULL) {
  976                 /*
  977                  * Make room for this peripheral
  978                  * so it will fit in the queue
  979                  * when it's scheduled to run
  980                  */
  981                 status = camq_resize(&device->drvq,
  982                                      device->drvq.array_size + 1);
  983 
  984                 device->generation++;
  985 
  986                 SLIST_INSERT_HEAD(periph_head, periph, periph_links);
  987         }
  988 
  989         mtx_lock(&xsoftc.xpt_topo_lock);
  990         xsoftc.xpt_generation++;
  991         mtx_unlock(&xsoftc.xpt_topo_lock);
  992 
  993         return (status);
  994 }
  995 
  996 void
  997 xpt_remove_periph(struct cam_periph *periph)
  998 {
  999         struct cam_ed *device;
 1000 
 1001         mtx_assert(periph->sim->mtx, MA_OWNED);
 1002 
 1003         device = periph->path->device;
 1004 
 1005         if (device != NULL) {
 1006                 struct periph_list *periph_head;
 1007 
 1008                 periph_head = &device->periphs;
 1009 
 1010                 /* Release the slot for this peripheral */
 1011                 camq_resize(&device->drvq, device->drvq.array_size - 1);
 1012 
 1013                 device->generation++;
 1014 
 1015                 SLIST_REMOVE(periph_head, periph, cam_periph, periph_links);
 1016         }
 1017 
 1018         mtx_lock(&xsoftc.xpt_topo_lock);
 1019         xsoftc.xpt_generation++;
 1020         mtx_unlock(&xsoftc.xpt_topo_lock);
 1021 }
 1022 
 1023 
 1024 void
 1025 xpt_announce_periph(struct cam_periph *periph, char *announce_string)
 1026 {
 1027         struct  ccb_pathinq cpi;
 1028         struct  ccb_trans_settings cts;
 1029         struct  cam_path *path;
 1030         u_int   speed;
 1031         u_int   freq;
 1032         u_int   mb;
 1033 
 1034         mtx_assert(periph->sim->mtx, MA_OWNED);
 1035 
 1036         path = periph->path;
 1037         /*
 1038          * To ensure that this is printed in one piece,
 1039          * mask out CAM interrupts.
 1040          */
 1041         printf("%s%d at %s%d bus %d target %d lun %d\n",
 1042                periph->periph_name, periph->unit_number,
 1043                path->bus->sim->sim_name,
 1044                path->bus->sim->unit_number,
 1045                path->bus->sim->bus_id,
 1046                path->target->target_id,
 1047                path->device->lun_id);
 1048         printf("%s%d: ", periph->periph_name, periph->unit_number);
 1049         if (path->device->protocol == PROTO_SCSI)
 1050             scsi_print_inquiry(&path->device->inq_data);
 1051         else if (path->device->protocol == PROTO_ATA ||
 1052             path->device->protocol == PROTO_SATAPM)
 1053                 ata_print_ident(&path->device->ident_data);
 1054         else
 1055             printf("Unknown protocol device\n");
 1056         if (bootverbose && path->device->serial_num_len > 0) {
 1057                 /* Don't wrap the screen  - print only the first 60 chars */
 1058                 printf("%s%d: Serial Number %.60s\n", periph->periph_name,
 1059                        periph->unit_number, path->device->serial_num);
 1060         }
 1061         xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1);
 1062         cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
 1063         cts.type = CTS_TYPE_CURRENT_SETTINGS;
 1064         xpt_action((union ccb*)&cts);
 1065         if ((cts.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
 1066                 return;
 1067         }
 1068 
 1069         /* Ask the SIM for its base transfer speed */
 1070         xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
 1071         cpi.ccb_h.func_code = XPT_PATH_INQ;
 1072         xpt_action((union ccb *)&cpi);
 1073 
 1074         speed = cpi.base_transfer_speed;
 1075         freq = 0;
 1076         if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SPI) {
 1077                 struct  ccb_trans_settings_spi *spi;
 1078 
 1079                 spi = &cts.xport_specific.spi;
 1080                 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0
 1081                   && spi->sync_offset != 0) {
 1082                         freq = scsi_calc_syncsrate(spi->sync_period);
 1083                         speed = freq;
 1084                 }
 1085 
 1086                 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0)
 1087                         speed *= (0x01 << spi->bus_width);
 1088         }
 1089         if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_FC) {
 1090                 struct  ccb_trans_settings_fc *fc = &cts.xport_specific.fc;
 1091                 if (fc->valid & CTS_FC_VALID_SPEED)
 1092                         speed = fc->bitrate;
 1093         }
 1094         if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SAS) {
 1095                 struct  ccb_trans_settings_sas *sas = &cts.xport_specific.sas;
 1096                 if (sas->valid & CTS_SAS_VALID_SPEED)
 1097                         speed = sas->bitrate;
 1098         }
 1099         if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SATA) {
 1100                 struct  ccb_trans_settings_sata *sata = &cts.xport_specific.sata;
 1101                 if (sata->valid & CTS_SATA_VALID_SPEED)
 1102                         speed = sata->bitrate;
 1103         }
 1104 
 1105         mb = speed / 1000;
 1106         if (mb > 0)
 1107                 printf("%s%d: %d.%03dMB/s transfers",
 1108                        periph->periph_name, periph->unit_number,
 1109                        mb, speed % 1000);
 1110         else
 1111                 printf("%s%d: %dKB/s transfers", periph->periph_name,
 1112                        periph->unit_number, speed);
 1113         /* Report additional information about SPI connections */
 1114         if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SPI) {
 1115                 struct  ccb_trans_settings_spi *spi;
 1116 
 1117                 spi = &cts.xport_specific.spi;
 1118                 if (freq != 0) {
 1119                         printf(" (%d.%03dMHz%s, offset %d", freq / 1000,
 1120                                freq % 1000,
 1121                                (spi->ppr_options & MSG_EXT_PPR_DT_REQ) != 0
 1122                              ? " DT" : "",
 1123                                spi->sync_offset);
 1124                 }
 1125                 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0
 1126                  && spi->bus_width > 0) {
 1127                         if (freq != 0) {
 1128                                 printf(", ");
 1129                         } else {
 1130                                 printf(" (");
 1131                         }
 1132                         printf("%dbit)", 8 * (0x01 << spi->bus_width));
 1133                 } else if (freq != 0) {
 1134                         printf(")");
 1135                 }
 1136         }
 1137         if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_FC) {
 1138                 struct  ccb_trans_settings_fc *fc;
 1139 
 1140                 fc = &cts.xport_specific.fc;
 1141                 if (fc->valid & CTS_FC_VALID_WWNN)
 1142                         printf(" WWNN 0x%llx", (long long) fc->wwnn);
 1143                 if (fc->valid & CTS_FC_VALID_WWPN)
 1144                         printf(" WWPN 0x%llx", (long long) fc->wwpn);
 1145                 if (fc->valid & CTS_FC_VALID_PORT)
 1146                         printf(" PortID 0x%x", fc->port);
 1147         }
 1148 
 1149         if (path->device->inq_flags & SID_CmdQue
 1150          || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
 1151                 printf("\n%s%d: Command Queueing enabled",
 1152                        periph->periph_name, periph->unit_number);
 1153         }
 1154         printf("\n");
 1155 
 1156         /*
 1157          * We only want to print the caller's announce string if they've
 1158          * passed one in..
 1159          */
 1160         if (announce_string != NULL)
 1161                 printf("%s%d: %s\n", periph->periph_name,
 1162                        periph->unit_number, announce_string);
 1163 }
 1164 
 1165 static dev_match_ret
 1166 xptbusmatch(struct dev_match_pattern *patterns, u_int num_patterns,
 1167             struct cam_eb *bus)
 1168 {
 1169         dev_match_ret retval;
 1170         int i;
 1171 
 1172         retval = DM_RET_NONE;
 1173 
 1174         /*
 1175          * If we aren't given something to match against, that's an error.
 1176          */
 1177         if (bus == NULL)
 1178                 return(DM_RET_ERROR);
 1179 
 1180         /*
 1181          * If there are no match entries, then this bus matches no
 1182          * matter what.
 1183          */
 1184         if ((patterns == NULL) || (num_patterns == 0))
 1185                 return(DM_RET_DESCEND | DM_RET_COPY);
 1186 
 1187         for (i = 0; i < num_patterns; i++) {
 1188                 struct bus_match_pattern *cur_pattern;
 1189 
 1190                 /*
 1191                  * If the pattern in question isn't for a bus node, we
 1192                  * aren't interested.  However, we do indicate to the
 1193                  * calling routine that we should continue descending the
 1194                  * tree, since the user wants to match against lower-level
 1195                  * EDT elements.
 1196                  */
 1197                 if (patterns[i].type != DEV_MATCH_BUS) {
 1198                         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
 1199                                 retval |= DM_RET_DESCEND;
 1200                         continue;
 1201                 }
 1202 
 1203                 cur_pattern = &patterns[i].pattern.bus_pattern;
 1204 
 1205                 /*
 1206                  * If they want to match any bus node, we give them any
 1207                  * device node.
 1208                  */
 1209                 if (cur_pattern->flags == BUS_MATCH_ANY) {
 1210                         /* set the copy flag */
 1211                         retval |= DM_RET_COPY;
 1212 
 1213                         /*
 1214                          * If we've already decided on an action, go ahead
 1215                          * and return.
 1216                          */
 1217                         if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
 1218                                 return(retval);
 1219                 }
 1220 
 1221                 /*
 1222                  * Not sure why someone would do this...
 1223                  */
 1224                 if (cur_pattern->flags == BUS_MATCH_NONE)
 1225                         continue;
 1226 
 1227                 if (((cur_pattern->flags & BUS_MATCH_PATH) != 0)
 1228                  && (cur_pattern->path_id != bus->path_id))
 1229                         continue;
 1230 
 1231                 if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0)
 1232                  && (cur_pattern->bus_id != bus->sim->bus_id))
 1233                         continue;
 1234 
 1235                 if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0)
 1236                  && (cur_pattern->unit_number != bus->sim->unit_number))
 1237                         continue;
 1238 
 1239                 if (((cur_pattern->flags & BUS_MATCH_NAME) != 0)
 1240                  && (strncmp(cur_pattern->dev_name, bus->sim->sim_name,
 1241                              DEV_IDLEN) != 0))
 1242                         continue;
 1243 
 1244                 /*
 1245                  * If we get to this point, the user definitely wants
 1246                  * information on this bus.  So tell the caller to copy the
 1247                  * data out.
 1248                  */
 1249                 retval |= DM_RET_COPY;
 1250 
 1251                 /*
 1252                  * If the return action has been set to descend, then we
 1253                  * know that we've already seen a non-bus matching
 1254                  * expression, therefore we need to further descend the tree.
 1255                  * This won't change by continuing around the loop, so we
 1256                  * go ahead and return.  If we haven't seen a non-bus
 1257                  * matching expression, we keep going around the loop until
 1258                  * we exhaust the matching expressions.  We'll set the stop
 1259                  * flag once we fall out of the loop.
 1260                  */
 1261                 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
 1262                         return(retval);
 1263         }
 1264 
 1265         /*
 1266          * If the return action hasn't been set to descend yet, that means
 1267          * we haven't seen anything other than bus matching patterns.  So
 1268          * tell the caller to stop descending the tree -- the user doesn't
 1269          * want to match against lower level tree elements.
 1270          */
 1271         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
 1272                 retval |= DM_RET_STOP;
 1273 
 1274         return(retval);
 1275 }
 1276 
 1277 static dev_match_ret
 1278 xptdevicematch(struct dev_match_pattern *patterns, u_int num_patterns,
 1279                struct cam_ed *device)
 1280 {
 1281         dev_match_ret retval;
 1282         int i;
 1283 
 1284         retval = DM_RET_NONE;
 1285 
 1286         /*
 1287          * If we aren't given something to match against, that's an error.
 1288          */
 1289         if (device == NULL)
 1290                 return(DM_RET_ERROR);
 1291 
 1292         /*
 1293          * If there are no match entries, then this device matches no
 1294          * matter what.
 1295          */
 1296         if ((patterns == NULL) || (num_patterns == 0))
 1297                 return(DM_RET_DESCEND | DM_RET_COPY);
 1298 
 1299         for (i = 0; i < num_patterns; i++) {
 1300                 struct device_match_pattern *cur_pattern;
 1301 
 1302                 /*
 1303                  * If the pattern in question isn't for a device node, we
 1304                  * aren't interested.
 1305                  */
 1306                 if (patterns[i].type != DEV_MATCH_DEVICE) {
 1307                         if ((patterns[i].type == DEV_MATCH_PERIPH)
 1308                          && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE))
 1309                                 retval |= DM_RET_DESCEND;
 1310                         continue;
 1311                 }
 1312 
 1313                 cur_pattern = &patterns[i].pattern.device_pattern;
 1314 
 1315                 /*
 1316                  * If they want to match any device node, we give them any
 1317                  * device node.
 1318                  */
 1319                 if (cur_pattern->flags == DEV_MATCH_ANY) {
 1320                         /* set the copy flag */
 1321                         retval |= DM_RET_COPY;
 1322 
 1323 
 1324                         /*
 1325                          * If we've already decided on an action, go ahead
 1326                          * and return.
 1327                          */
 1328                         if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
 1329                                 return(retval);
 1330                 }
 1331 
 1332                 /*
 1333                  * Not sure why someone would do this...
 1334                  */
 1335                 if (cur_pattern->flags == DEV_MATCH_NONE)
 1336                         continue;
 1337 
 1338                 if (((cur_pattern->flags & DEV_MATCH_PATH) != 0)
 1339                  && (cur_pattern->path_id != device->target->bus->path_id))
 1340                         continue;
 1341 
 1342                 if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0)
 1343                  && (cur_pattern->target_id != device->target->target_id))
 1344                         continue;
 1345 
 1346                 if (((cur_pattern->flags & DEV_MATCH_LUN) != 0)
 1347                  && (cur_pattern->target_lun != device->lun_id))
 1348                         continue;
 1349 
 1350                 if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0)
 1351                  && (cam_quirkmatch((caddr_t)&device->inq_data,
 1352                                     (caddr_t)&cur_pattern->inq_pat,
 1353                                     1, sizeof(cur_pattern->inq_pat),
 1354                                     scsi_static_inquiry_match) == NULL))
 1355                         continue;
 1356 
 1357                 /*
 1358                  * If we get to this point, the user definitely wants
 1359                  * information on this device.  So tell the caller to copy
 1360                  * the data out.
 1361                  */
 1362                 retval |= DM_RET_COPY;
 1363 
 1364                 /*
 1365                  * If the return action has been set to descend, then we
 1366                  * know that we've already seen a peripheral matching
 1367                  * expression, therefore we need to further descend the tree.
 1368                  * This won't change by continuing around the loop, so we
 1369                  * go ahead and return.  If we haven't seen a peripheral
 1370                  * matching expression, we keep going around the loop until
 1371                  * we exhaust the matching expressions.  We'll set the stop
 1372                  * flag once we fall out of the loop.
 1373                  */
 1374                 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
 1375                         return(retval);
 1376         }
 1377 
 1378         /*
 1379          * If the return action hasn't been set to descend yet, that means
 1380          * we haven't seen any peripheral matching patterns.  So tell the
 1381          * caller to stop descending the tree -- the user doesn't want to
 1382          * match against lower level tree elements.
 1383          */
 1384         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
 1385                 retval |= DM_RET_STOP;
 1386 
 1387         return(retval);
 1388 }
 1389 
 1390 /*
 1391  * Match a single peripheral against any number of match patterns.
 1392  */
 1393 static dev_match_ret
 1394 xptperiphmatch(struct dev_match_pattern *patterns, u_int num_patterns,
 1395                struct cam_periph *periph)
 1396 {
 1397         dev_match_ret retval;
 1398         int i;
 1399 
 1400         /*
 1401          * If we aren't given something to match against, that's an error.
 1402          */
 1403         if (periph == NULL)
 1404                 return(DM_RET_ERROR);
 1405 
 1406         /*
 1407          * If there are no match entries, then this peripheral matches no
 1408          * matter what.
 1409          */
 1410         if ((patterns == NULL) || (num_patterns == 0))
 1411                 return(DM_RET_STOP | DM_RET_COPY);
 1412 
 1413         /*
 1414          * There aren't any nodes below a peripheral node, so there's no
 1415          * reason to descend the tree any further.
 1416          */
 1417         retval = DM_RET_STOP;
 1418 
 1419         for (i = 0; i < num_patterns; i++) {
 1420                 struct periph_match_pattern *cur_pattern;
 1421 
 1422                 /*
 1423                  * If the pattern in question isn't for a peripheral, we
 1424                  * aren't interested.
 1425                  */
 1426                 if (patterns[i].type != DEV_MATCH_PERIPH)
 1427                         continue;
 1428 
 1429                 cur_pattern = &patterns[i].pattern.periph_pattern;
 1430 
 1431                 /*
 1432                  * If they want to match on anything, then we will do so.
 1433                  */
 1434                 if (cur_pattern->flags == PERIPH_MATCH_ANY) {
 1435                         /* set the copy flag */
 1436                         retval |= DM_RET_COPY;
 1437 
 1438                         /*
 1439                          * We've already set the return action to stop,
 1440                          * since there are no nodes below peripherals in
 1441                          * the tree.
 1442                          */
 1443                         return(retval);
 1444                 }
 1445 
 1446                 /*
 1447                  * Not sure why someone would do this...
 1448                  */
 1449                 if (cur_pattern->flags == PERIPH_MATCH_NONE)
 1450                         continue;
 1451 
 1452                 if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0)
 1453                  && (cur_pattern->path_id != periph->path->bus->path_id))
 1454                         continue;
 1455 
 1456                 /*
 1457                  * For the target and lun id's, we have to make sure the
 1458                  * target and lun pointers aren't NULL.  The xpt peripheral
 1459                  * has a wildcard target and device.
 1460                  */
 1461                 if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0)
 1462                  && ((periph->path->target == NULL)
 1463                  ||(cur_pattern->target_id != periph->path->target->target_id)))
 1464                         continue;
 1465 
 1466                 if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0)
 1467                  && ((periph->path->device == NULL)
 1468                  || (cur_pattern->target_lun != periph->path->device->lun_id)))
 1469                         continue;
 1470 
 1471                 if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0)
 1472                  && (cur_pattern->unit_number != periph->unit_number))
 1473                         continue;
 1474 
 1475                 if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0)
 1476                  && (strncmp(cur_pattern->periph_name, periph->periph_name,
 1477                              DEV_IDLEN) != 0))
 1478                         continue;
 1479 
 1480                 /*
 1481                  * If we get to this point, the user definitely wants
 1482                  * information on this peripheral.  So tell the caller to
 1483                  * copy the data out.
 1484                  */
 1485                 retval |= DM_RET_COPY;
 1486 
 1487                 /*
 1488                  * The return action has already been set to stop, since
 1489                  * peripherals don't have any nodes below them in the EDT.
 1490                  */
 1491                 return(retval);
 1492         }
 1493 
 1494         /*
 1495          * If we get to this point, the peripheral that was passed in
 1496          * doesn't match any of the patterns.
 1497          */
 1498         return(retval);
 1499 }
 1500 
 1501 static int
 1502 xptedtbusfunc(struct cam_eb *bus, void *arg)
 1503 {
 1504         struct ccb_dev_match *cdm;
 1505         dev_match_ret retval;
 1506 
 1507         cdm = (struct ccb_dev_match *)arg;
 1508 
 1509         /*
 1510          * If our position is for something deeper in the tree, that means
 1511          * that we've already seen this node.  So, we keep going down.
 1512          */
 1513         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 1514          && (cdm->pos.cookie.bus == bus)
 1515          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 1516          && (cdm->pos.cookie.target != NULL))
 1517                 retval = DM_RET_DESCEND;
 1518         else
 1519                 retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus);
 1520 
 1521         /*
 1522          * If we got an error, bail out of the search.
 1523          */
 1524         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
 1525                 cdm->status = CAM_DEV_MATCH_ERROR;
 1526                 return(0);
 1527         }
 1528 
 1529         /*
 1530          * If the copy flag is set, copy this bus out.
 1531          */
 1532         if (retval & DM_RET_COPY) {
 1533                 int spaceleft, j;
 1534 
 1535                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
 1536                         sizeof(struct dev_match_result));
 1537 
 1538                 /*
 1539                  * If we don't have enough space to put in another
 1540                  * match result, save our position and tell the
 1541                  * user there are more devices to check.
 1542                  */
 1543                 if (spaceleft < sizeof(struct dev_match_result)) {
 1544                         bzero(&cdm->pos, sizeof(cdm->pos));
 1545                         cdm->pos.position_type =
 1546                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS;
 1547 
 1548                         cdm->pos.cookie.bus = bus;
 1549                         cdm->pos.generations[CAM_BUS_GENERATION]=
 1550                                 xsoftc.bus_generation;
 1551                         cdm->status = CAM_DEV_MATCH_MORE;
 1552                         return(0);
 1553                 }
 1554                 j = cdm->num_matches;
 1555                 cdm->num_matches++;
 1556                 cdm->matches[j].type = DEV_MATCH_BUS;
 1557                 cdm->matches[j].result.bus_result.path_id = bus->path_id;
 1558                 cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id;
 1559                 cdm->matches[j].result.bus_result.unit_number =
 1560                         bus->sim->unit_number;
 1561                 strncpy(cdm->matches[j].result.bus_result.dev_name,
 1562                         bus->sim->sim_name, DEV_IDLEN);
 1563         }
 1564 
 1565         /*
 1566          * If the user is only interested in busses, there's no
 1567          * reason to descend to the next level in the tree.
 1568          */
 1569         if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
 1570                 return(1);
 1571 
 1572         /*
 1573          * If there is a target generation recorded, check it to
 1574          * make sure the target list hasn't changed.
 1575          */
 1576         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 1577          && (bus == cdm->pos.cookie.bus)
 1578          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 1579          && (cdm->pos.generations[CAM_TARGET_GENERATION] != 0)
 1580          && (cdm->pos.generations[CAM_TARGET_GENERATION] !=
 1581              bus->generation)) {
 1582                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
 1583                 return(0);
 1584         }
 1585 
 1586         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 1587          && (cdm->pos.cookie.bus == bus)
 1588          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 1589          && (cdm->pos.cookie.target != NULL))
 1590                 return(xpttargettraverse(bus,
 1591                                         (struct cam_et *)cdm->pos.cookie.target,
 1592                                          xptedttargetfunc, arg));
 1593         else
 1594                 return(xpttargettraverse(bus, NULL, xptedttargetfunc, arg));
 1595 }
 1596 
 1597 static int
 1598 xptedttargetfunc(struct cam_et *target, void *arg)
 1599 {
 1600         struct ccb_dev_match *cdm;
 1601 
 1602         cdm = (struct ccb_dev_match *)arg;
 1603 
 1604         /*
 1605          * If there is a device list generation recorded, check it to
 1606          * make sure the device list hasn't changed.
 1607          */
 1608         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 1609          && (cdm->pos.cookie.bus == target->bus)
 1610          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 1611          && (cdm->pos.cookie.target == target)
 1612          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
 1613          && (cdm->pos.generations[CAM_DEV_GENERATION] != 0)
 1614          && (cdm->pos.generations[CAM_DEV_GENERATION] !=
 1615              target->generation)) {
 1616                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
 1617                 return(0);
 1618         }
 1619 
 1620         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 1621          && (cdm->pos.cookie.bus == target->bus)
 1622          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 1623          && (cdm->pos.cookie.target == target)
 1624          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
 1625          && (cdm->pos.cookie.device != NULL))
 1626                 return(xptdevicetraverse(target,
 1627                                         (struct cam_ed *)cdm->pos.cookie.device,
 1628                                          xptedtdevicefunc, arg));
 1629         else
 1630                 return(xptdevicetraverse(target, NULL, xptedtdevicefunc, arg));
 1631 }
 1632 
 1633 static int
 1634 xptedtdevicefunc(struct cam_ed *device, void *arg)
 1635 {
 1636 
 1637         struct ccb_dev_match *cdm;
 1638         dev_match_ret retval;
 1639 
 1640         cdm = (struct ccb_dev_match *)arg;
 1641 
 1642         /*
 1643          * If our position is for something deeper in the tree, that means
 1644          * that we've already seen this node.  So, we keep going down.
 1645          */
 1646         if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE)
 1647          && (cdm->pos.cookie.device == device)
 1648          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
 1649          && (cdm->pos.cookie.periph != NULL))
 1650                 retval = DM_RET_DESCEND;
 1651         else
 1652                 retval = xptdevicematch(cdm->patterns, cdm->num_patterns,
 1653                                         device);
 1654 
 1655         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
 1656                 cdm->status = CAM_DEV_MATCH_ERROR;
 1657                 return(0);
 1658         }
 1659 
 1660         /*
 1661          * If the copy flag is set, copy this device out.
 1662          */
 1663         if (retval & DM_RET_COPY) {
 1664                 int spaceleft, j;
 1665 
 1666                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
 1667                         sizeof(struct dev_match_result));
 1668 
 1669                 /*
 1670                  * If we don't have enough space to put in another
 1671                  * match result, save our position and tell the
 1672                  * user there are more devices to check.
 1673                  */
 1674                 if (spaceleft < sizeof(struct dev_match_result)) {
 1675                         bzero(&cdm->pos, sizeof(cdm->pos));
 1676                         cdm->pos.position_type =
 1677                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
 1678                                 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE;
 1679 
 1680                         cdm->pos.cookie.bus = device->target->bus;
 1681                         cdm->pos.generations[CAM_BUS_GENERATION]=
 1682                                 xsoftc.bus_generation;
 1683                         cdm->pos.cookie.target = device->target;
 1684                         cdm->pos.generations[CAM_TARGET_GENERATION] =
 1685                                 device->target->bus->generation;
 1686                         cdm->pos.cookie.device = device;
 1687                         cdm->pos.generations[CAM_DEV_GENERATION] =
 1688                                 device->target->generation;
 1689                         cdm->status = CAM_DEV_MATCH_MORE;
 1690                         return(0);
 1691                 }
 1692                 j = cdm->num_matches;
 1693                 cdm->num_matches++;
 1694                 cdm->matches[j].type = DEV_MATCH_DEVICE;
 1695                 cdm->matches[j].result.device_result.path_id =
 1696                         device->target->bus->path_id;
 1697                 cdm->matches[j].result.device_result.target_id =
 1698                         device->target->target_id;
 1699                 cdm->matches[j].result.device_result.target_lun =
 1700                         device->lun_id;
 1701                 cdm->matches[j].result.device_result.protocol =
 1702                         device->protocol;
 1703                 bcopy(&device->inq_data,
 1704                       &cdm->matches[j].result.device_result.inq_data,
 1705                       sizeof(struct scsi_inquiry_data));
 1706                 bcopy(&device->ident_data,
 1707                       &cdm->matches[j].result.device_result.ident_data,
 1708                       sizeof(struct ata_params));
 1709 
 1710                 /* Let the user know whether this device is unconfigured */
 1711                 if (device->flags & CAM_DEV_UNCONFIGURED)
 1712                         cdm->matches[j].result.device_result.flags =
 1713                                 DEV_RESULT_UNCONFIGURED;
 1714                 else
 1715                         cdm->matches[j].result.device_result.flags =
 1716                                 DEV_RESULT_NOFLAG;
 1717         }
 1718 
 1719         /*
 1720          * If the user isn't interested in peripherals, don't descend
 1721          * the tree any further.
 1722          */
 1723         if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
 1724                 return(1);
 1725 
 1726         /*
 1727          * If there is a peripheral list generation recorded, make sure
 1728          * it hasn't changed.
 1729          */
 1730         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 1731          && (device->target->bus == cdm->pos.cookie.bus)
 1732          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 1733          && (device->target == cdm->pos.cookie.target)
 1734          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
 1735          && (device == cdm->pos.cookie.device)
 1736          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
 1737          && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
 1738          && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
 1739              device->generation)){
 1740                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
 1741                 return(0);
 1742         }
 1743 
 1744         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 1745          && (cdm->pos.cookie.bus == device->target->bus)
 1746          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 1747          && (cdm->pos.cookie.target == device->target)
 1748          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
 1749          && (cdm->pos.cookie.device == device)
 1750          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
 1751          && (cdm->pos.cookie.periph != NULL))
 1752                 return(xptperiphtraverse(device,
 1753                                 (struct cam_periph *)cdm->pos.cookie.periph,
 1754                                 xptedtperiphfunc, arg));
 1755         else
 1756                 return(xptperiphtraverse(device, NULL, xptedtperiphfunc, arg));
 1757 }
 1758 
 1759 static int
 1760 xptedtperiphfunc(struct cam_periph *periph, void *arg)
 1761 {
 1762         struct ccb_dev_match *cdm;
 1763         dev_match_ret retval;
 1764 
 1765         cdm = (struct ccb_dev_match *)arg;
 1766 
 1767         retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
 1768 
 1769         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
 1770                 cdm->status = CAM_DEV_MATCH_ERROR;
 1771                 return(0);
 1772         }
 1773 
 1774         /*
 1775          * If the copy flag is set, copy this peripheral out.
 1776          */
 1777         if (retval & DM_RET_COPY) {
 1778                 int spaceleft, j;
 1779 
 1780                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
 1781                         sizeof(struct dev_match_result));
 1782 
 1783                 /*
 1784                  * If we don't have enough space to put in another
 1785                  * match result, save our position and tell the
 1786                  * user there are more devices to check.
 1787                  */
 1788                 if (spaceleft < sizeof(struct dev_match_result)) {
 1789                         bzero(&cdm->pos, sizeof(cdm->pos));
 1790                         cdm->pos.position_type =
 1791                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
 1792                                 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE |
 1793                                 CAM_DEV_POS_PERIPH;
 1794 
 1795                         cdm->pos.cookie.bus = periph->path->bus;
 1796                         cdm->pos.generations[CAM_BUS_GENERATION]=
 1797                                 xsoftc.bus_generation;
 1798                         cdm->pos.cookie.target = periph->path->target;
 1799                         cdm->pos.generations[CAM_TARGET_GENERATION] =
 1800                                 periph->path->bus->generation;
 1801                         cdm->pos.cookie.device = periph->path->device;
 1802                         cdm->pos.generations[CAM_DEV_GENERATION] =
 1803                                 periph->path->target->generation;
 1804                         cdm->pos.cookie.periph = periph;
 1805                         cdm->pos.generations[CAM_PERIPH_GENERATION] =
 1806                                 periph->path->device->generation;
 1807                         cdm->status = CAM_DEV_MATCH_MORE;
 1808                         return(0);
 1809                 }
 1810 
 1811                 j = cdm->num_matches;
 1812                 cdm->num_matches++;
 1813                 cdm->matches[j].type = DEV_MATCH_PERIPH;
 1814                 cdm->matches[j].result.periph_result.path_id =
 1815                         periph->path->bus->path_id;
 1816                 cdm->matches[j].result.periph_result.target_id =
 1817                         periph->path->target->target_id;
 1818                 cdm->matches[j].result.periph_result.target_lun =
 1819                         periph->path->device->lun_id;
 1820                 cdm->matches[j].result.periph_result.unit_number =
 1821                         periph->unit_number;
 1822                 strncpy(cdm->matches[j].result.periph_result.periph_name,
 1823                         periph->periph_name, DEV_IDLEN);
 1824         }
 1825 
 1826         return(1);
 1827 }
 1828 
 1829 static int
 1830 xptedtmatch(struct ccb_dev_match *cdm)
 1831 {
 1832         int ret;
 1833 
 1834         cdm->num_matches = 0;
 1835 
 1836         /*
 1837          * Check the bus list generation.  If it has changed, the user
 1838          * needs to reset everything and start over.
 1839          */
 1840         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 1841          && (cdm->pos.generations[CAM_BUS_GENERATION] != 0)
 1842          && (cdm->pos.generations[CAM_BUS_GENERATION] != xsoftc.bus_generation)) {
 1843                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
 1844                 return(0);
 1845         }
 1846 
 1847         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 1848          && (cdm->pos.cookie.bus != NULL))
 1849                 ret = xptbustraverse((struct cam_eb *)cdm->pos.cookie.bus,
 1850                                      xptedtbusfunc, cdm);
 1851         else
 1852                 ret = xptbustraverse(NULL, xptedtbusfunc, cdm);
 1853 
 1854         /*
 1855          * If we get back 0, that means that we had to stop before fully
 1856          * traversing the EDT.  It also means that one of the subroutines
 1857          * has set the status field to the proper value.  If we get back 1,
 1858          * we've fully traversed the EDT and copied out any matching entries.
 1859          */
 1860         if (ret == 1)
 1861                 cdm->status = CAM_DEV_MATCH_LAST;
 1862 
 1863         return(ret);
 1864 }
 1865 
 1866 static int
 1867 xptplistpdrvfunc(struct periph_driver **pdrv, void *arg)
 1868 {
 1869         struct ccb_dev_match *cdm;
 1870 
 1871         cdm = (struct ccb_dev_match *)arg;
 1872 
 1873         if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
 1874          && (cdm->pos.cookie.pdrv == pdrv)
 1875          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
 1876          && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
 1877          && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
 1878              (*pdrv)->generation)) {
 1879                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
 1880                 return(0);
 1881         }
 1882 
 1883         if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
 1884          && (cdm->pos.cookie.pdrv == pdrv)
 1885          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
 1886          && (cdm->pos.cookie.periph != NULL))
 1887                 return(xptpdperiphtraverse(pdrv,
 1888                                 (struct cam_periph *)cdm->pos.cookie.periph,
 1889                                 xptplistperiphfunc, arg));
 1890         else
 1891                 return(xptpdperiphtraverse(pdrv, NULL,xptplistperiphfunc, arg));
 1892 }
 1893 
 1894 static int
 1895 xptplistperiphfunc(struct cam_periph *periph, void *arg)
 1896 {
 1897         struct ccb_dev_match *cdm;
 1898         dev_match_ret retval;
 1899 
 1900         cdm = (struct ccb_dev_match *)arg;
 1901 
 1902         retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
 1903 
 1904         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
 1905                 cdm->status = CAM_DEV_MATCH_ERROR;
 1906                 return(0);
 1907         }
 1908 
 1909         /*
 1910          * If the copy flag is set, copy this peripheral out.
 1911          */
 1912         if (retval & DM_RET_COPY) {
 1913                 int spaceleft, j;
 1914 
 1915                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
 1916                         sizeof(struct dev_match_result));
 1917 
 1918                 /*
 1919                  * If we don't have enough space to put in another
 1920                  * match result, save our position and tell the
 1921                  * user there are more devices to check.
 1922                  */
 1923                 if (spaceleft < sizeof(struct dev_match_result)) {
 1924                         struct periph_driver **pdrv;
 1925 
 1926                         pdrv = NULL;
 1927                         bzero(&cdm->pos, sizeof(cdm->pos));
 1928                         cdm->pos.position_type =
 1929                                 CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR |
 1930                                 CAM_DEV_POS_PERIPH;
 1931 
 1932                         /*
 1933                          * This may look a bit non-sensical, but it is
 1934                          * actually quite logical.  There are very few
 1935                          * peripheral drivers, and bloating every peripheral
 1936                          * structure with a pointer back to its parent
 1937                          * peripheral driver linker set entry would cost
 1938                          * more in the long run than doing this quick lookup.
 1939                          */
 1940                         for (pdrv = periph_drivers; *pdrv != NULL; pdrv++) {
 1941                                 if (strcmp((*pdrv)->driver_name,
 1942                                     periph->periph_name) == 0)
 1943                                         break;
 1944                         }
 1945 
 1946                         if (*pdrv == NULL) {
 1947                                 cdm->status = CAM_DEV_MATCH_ERROR;
 1948                                 return(0);
 1949                         }
 1950 
 1951                         cdm->pos.cookie.pdrv = pdrv;
 1952                         /*
 1953                          * The periph generation slot does double duty, as
 1954                          * does the periph pointer slot.  They are used for
 1955                          * both edt and pdrv lookups and positioning.
 1956                          */
 1957                         cdm->pos.cookie.periph = periph;
 1958                         cdm->pos.generations[CAM_PERIPH_GENERATION] =
 1959                                 (*pdrv)->generation;
 1960                         cdm->status = CAM_DEV_MATCH_MORE;
 1961                         return(0);
 1962                 }
 1963 
 1964                 j = cdm->num_matches;
 1965                 cdm->num_matches++;
 1966                 cdm->matches[j].type = DEV_MATCH_PERIPH;
 1967                 cdm->matches[j].result.periph_result.path_id =
 1968                         periph->path->bus->path_id;
 1969 
 1970                 /*
 1971                  * The transport layer peripheral doesn't have a target or
 1972                  * lun.
 1973                  */
 1974                 if (periph->path->target)
 1975                         cdm->matches[j].result.periph_result.target_id =
 1976                                 periph->path->target->target_id;
 1977                 else
 1978                         cdm->matches[j].result.periph_result.target_id = -1;
 1979 
 1980                 if (periph->path->device)
 1981                         cdm->matches[j].result.periph_result.target_lun =
 1982                                 periph->path->device->lun_id;
 1983                 else
 1984                         cdm->matches[j].result.periph_result.target_lun = -1;
 1985 
 1986                 cdm->matches[j].result.periph_result.unit_number =
 1987                         periph->unit_number;
 1988                 strncpy(cdm->matches[j].result.periph_result.periph_name,
 1989                         periph->periph_name, DEV_IDLEN);
 1990         }
 1991 
 1992         return(1);
 1993 }
 1994 
 1995 static int
 1996 xptperiphlistmatch(struct ccb_dev_match *cdm)
 1997 {
 1998         int ret;
 1999 
 2000         cdm->num_matches = 0;
 2001 
 2002         /*
 2003          * At this point in the edt traversal function, we check the bus
 2004          * list generation to make sure that no busses have been added or
 2005          * removed since the user last sent a XPT_DEV_MATCH ccb through.
 2006          * For the peripheral driver list traversal function, however, we
 2007          * don't have to worry about new peripheral driver types coming or
 2008          * going; they're in a linker set, and therefore can't change
 2009          * without a recompile.
 2010          */
 2011 
 2012         if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
 2013          && (cdm->pos.cookie.pdrv != NULL))
 2014                 ret = xptpdrvtraverse(
 2015                                 (struct periph_driver **)cdm->pos.cookie.pdrv,
 2016                                 xptplistpdrvfunc, cdm);
 2017         else
 2018                 ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm);
 2019 
 2020         /*
 2021          * If we get back 0, that means that we had to stop before fully
 2022          * traversing the peripheral driver tree.  It also means that one of
 2023          * the subroutines has set the status field to the proper value.  If
 2024          * we get back 1, we've fully traversed the EDT and copied out any
 2025          * matching entries.
 2026          */
 2027         if (ret == 1)
 2028                 cdm->status = CAM_DEV_MATCH_LAST;
 2029 
 2030         return(ret);
 2031 }
 2032 
 2033 static int
 2034 xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg)
 2035 {
 2036         struct cam_eb *bus, *next_bus;
 2037         int retval;
 2038 
 2039         retval = 1;
 2040 
 2041         mtx_lock(&xsoftc.xpt_topo_lock);
 2042         for (bus = (start_bus ? start_bus : TAILQ_FIRST(&xsoftc.xpt_busses));
 2043              bus != NULL;
 2044              bus = next_bus) {
 2045                 next_bus = TAILQ_NEXT(bus, links);
 2046 
 2047                 mtx_unlock(&xsoftc.xpt_topo_lock);
 2048                 CAM_SIM_LOCK(bus->sim);
 2049                 retval = tr_func(bus, arg);
 2050                 CAM_SIM_UNLOCK(bus->sim);
 2051                 if (retval == 0)
 2052                         return(retval);
 2053                 mtx_lock(&xsoftc.xpt_topo_lock);
 2054         }
 2055         mtx_unlock(&xsoftc.xpt_topo_lock);
 2056 
 2057         return(retval);
 2058 }
 2059 
 2060 int
 2061 xpt_sim_opened(struct cam_sim *sim)
 2062 {
 2063         struct cam_eb *bus;
 2064         struct cam_et *target;
 2065         struct cam_ed *device;
 2066         struct cam_periph *periph;
 2067 
 2068         KASSERT(sim->refcount >= 1, ("sim->refcount >= 1"));
 2069         mtx_assert(sim->mtx, MA_OWNED);
 2070 
 2071         mtx_lock(&xsoftc.xpt_topo_lock);
 2072         TAILQ_FOREACH(bus, &xsoftc.xpt_busses, links) {
 2073                 if (bus->sim != sim)
 2074                         continue;
 2075 
 2076                 TAILQ_FOREACH(target, &bus->et_entries, links) {
 2077                         TAILQ_FOREACH(device, &target->ed_entries, links) {
 2078                                 SLIST_FOREACH(periph, &device->periphs,
 2079                                     periph_links) {
 2080                                         if (periph->refcount > 0) {
 2081                                                 mtx_unlock(&xsoftc.xpt_topo_lock);
 2082                                                 return (1);
 2083                                         }
 2084                                 }
 2085                         }
 2086                 }
 2087         }
 2088 
 2089         mtx_unlock(&xsoftc.xpt_topo_lock);
 2090         return (0);
 2091 }
 2092 
 2093 static int
 2094 xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target,
 2095                   xpt_targetfunc_t *tr_func, void *arg)
 2096 {
 2097         struct cam_et *target, *next_target;
 2098         int retval;
 2099 
 2100         retval = 1;
 2101         for (target = (start_target ? start_target :
 2102                        TAILQ_FIRST(&bus->et_entries));
 2103              target != NULL; target = next_target) {
 2104 
 2105                 next_target = TAILQ_NEXT(target, links);
 2106 
 2107                 retval = tr_func(target, arg);
 2108 
 2109                 if (retval == 0)
 2110                         return(retval);
 2111         }
 2112 
 2113         return(retval);
 2114 }
 2115 
 2116 static int
 2117 xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device,
 2118                   xpt_devicefunc_t *tr_func, void *arg)
 2119 {
 2120         struct cam_ed *device, *next_device;
 2121         int retval;
 2122 
 2123         retval = 1;
 2124         for (device = (start_device ? start_device :
 2125                        TAILQ_FIRST(&target->ed_entries));
 2126              device != NULL;
 2127              device = next_device) {
 2128 
 2129                 next_device = TAILQ_NEXT(device, links);
 2130 
 2131                 retval = tr_func(device, arg);
 2132 
 2133                 if (retval == 0)
 2134                         return(retval);
 2135         }
 2136 
 2137         return(retval);
 2138 }
 2139 
 2140 static int
 2141 xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph,
 2142                   xpt_periphfunc_t *tr_func, void *arg)
 2143 {
 2144         struct cam_periph *periph, *next_periph;
 2145         int retval;
 2146 
 2147         retval = 1;
 2148 
 2149         for (periph = (start_periph ? start_periph :
 2150                        SLIST_FIRST(&device->periphs));
 2151              periph != NULL;
 2152              periph = next_periph) {
 2153 
 2154                 next_periph = SLIST_NEXT(periph, periph_links);
 2155 
 2156                 retval = tr_func(periph, arg);
 2157                 if (retval == 0)
 2158                         return(retval);
 2159         }
 2160 
 2161         return(retval);
 2162 }
 2163 
 2164 static int
 2165 xptpdrvtraverse(struct periph_driver **start_pdrv,
 2166                 xpt_pdrvfunc_t *tr_func, void *arg)
 2167 {
 2168         struct periph_driver **pdrv;
 2169         int retval;
 2170 
 2171         retval = 1;
 2172 
 2173         /*
 2174          * We don't traverse the peripheral driver list like we do the
 2175          * other lists, because it is a linker set, and therefore cannot be
 2176          * changed during runtime.  If the peripheral driver list is ever
 2177          * re-done to be something other than a linker set (i.e. it can
 2178          * change while the system is running), the list traversal should
 2179          * be modified to work like the other traversal functions.
 2180          */
 2181         for (pdrv = (start_pdrv ? start_pdrv : periph_drivers);
 2182              *pdrv != NULL; pdrv++) {
 2183                 retval = tr_func(pdrv, arg);
 2184 
 2185                 if (retval == 0)
 2186                         return(retval);
 2187         }
 2188 
 2189         return(retval);
 2190 }
 2191 
 2192 static int
 2193 xptpdperiphtraverse(struct periph_driver **pdrv,
 2194                     struct cam_periph *start_periph,
 2195                     xpt_periphfunc_t *tr_func, void *arg)
 2196 {
 2197         struct cam_periph *periph, *next_periph;
 2198         int retval;
 2199 
 2200         retval = 1;
 2201 
 2202         for (periph = (start_periph ? start_periph :
 2203              TAILQ_FIRST(&(*pdrv)->units)); periph != NULL;
 2204              periph = next_periph) {
 2205 
 2206                 next_periph = TAILQ_NEXT(periph, unit_links);
 2207 
 2208                 retval = tr_func(periph, arg);
 2209                 if (retval == 0)
 2210                         return(retval);
 2211         }
 2212         return(retval);
 2213 }
 2214 
 2215 static int
 2216 xptdefbusfunc(struct cam_eb *bus, void *arg)
 2217 {
 2218         struct xpt_traverse_config *tr_config;
 2219 
 2220         tr_config = (struct xpt_traverse_config *)arg;
 2221 
 2222         if (tr_config->depth == XPT_DEPTH_BUS) {
 2223                 xpt_busfunc_t *tr_func;
 2224 
 2225                 tr_func = (xpt_busfunc_t *)tr_config->tr_func;
 2226 
 2227                 return(tr_func(bus, tr_config->tr_arg));
 2228         } else
 2229                 return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg));
 2230 }
 2231 
 2232 static int
 2233 xptdeftargetfunc(struct cam_et *target, void *arg)
 2234 {
 2235         struct xpt_traverse_config *tr_config;
 2236 
 2237         tr_config = (struct xpt_traverse_config *)arg;
 2238 
 2239         if (tr_config->depth == XPT_DEPTH_TARGET) {
 2240                 xpt_targetfunc_t *tr_func;
 2241 
 2242                 tr_func = (xpt_targetfunc_t *)tr_config->tr_func;
 2243 
 2244                 return(tr_func(target, tr_config->tr_arg));
 2245         } else
 2246                 return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg));
 2247 }
 2248 
 2249 static int
 2250 xptdefdevicefunc(struct cam_ed *device, void *arg)
 2251 {
 2252         struct xpt_traverse_config *tr_config;
 2253 
 2254         tr_config = (struct xpt_traverse_config *)arg;
 2255 
 2256         if (tr_config->depth == XPT_DEPTH_DEVICE) {
 2257                 xpt_devicefunc_t *tr_func;
 2258 
 2259                 tr_func = (xpt_devicefunc_t *)tr_config->tr_func;
 2260 
 2261                 return(tr_func(device, tr_config->tr_arg));
 2262         } else
 2263                 return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg));
 2264 }
 2265 
 2266 static int
 2267 xptdefperiphfunc(struct cam_periph *periph, void *arg)
 2268 {
 2269         struct xpt_traverse_config *tr_config;
 2270         xpt_periphfunc_t *tr_func;
 2271 
 2272         tr_config = (struct xpt_traverse_config *)arg;
 2273 
 2274         tr_func = (xpt_periphfunc_t *)tr_config->tr_func;
 2275 
 2276         /*
 2277          * Unlike the other default functions, we don't check for depth
 2278          * here.  The peripheral driver level is the last level in the EDT,
 2279          * so if we're here, we should execute the function in question.
 2280          */
 2281         return(tr_func(periph, tr_config->tr_arg));
 2282 }
 2283 
 2284 /*
 2285  * Execute the given function for every bus in the EDT.
 2286  */
 2287 static int
 2288 xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg)
 2289 {
 2290         struct xpt_traverse_config tr_config;
 2291 
 2292         tr_config.depth = XPT_DEPTH_BUS;
 2293         tr_config.tr_func = tr_func;
 2294         tr_config.tr_arg = arg;
 2295 
 2296         return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
 2297 }
 2298 
 2299 /*
 2300  * Execute the given function for every device in the EDT.
 2301  */
 2302 static int
 2303 xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg)
 2304 {
 2305         struct xpt_traverse_config tr_config;
 2306 
 2307         tr_config.depth = XPT_DEPTH_DEVICE;
 2308         tr_config.tr_func = tr_func;
 2309         tr_config.tr_arg = arg;
 2310 
 2311         return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
 2312 }
 2313 
 2314 static int
 2315 xptsetasyncfunc(struct cam_ed *device, void *arg)
 2316 {
 2317         struct cam_path path;
 2318         struct ccb_getdev cgd;
 2319         struct async_node *cur_entry;
 2320 
 2321         cur_entry = (struct async_node *)arg;
 2322 
 2323         /*
 2324          * Don't report unconfigured devices (Wildcard devs,
 2325          * devices only for target mode, device instances
 2326          * that have been invalidated but are waiting for
 2327          * their last reference count to be released).
 2328          */
 2329         if ((device->flags & CAM_DEV_UNCONFIGURED) != 0)
 2330                 return (1);
 2331 
 2332         xpt_compile_path(&path,
 2333                          NULL,
 2334                          device->target->bus->path_id,
 2335                          device->target->target_id,
 2336                          device->lun_id);
 2337         xpt_setup_ccb(&cgd.ccb_h, &path, /*priority*/1);
 2338         cgd.ccb_h.func_code = XPT_GDEV_TYPE;
 2339         xpt_action((union ccb *)&cgd);
 2340         cur_entry->callback(cur_entry->callback_arg,
 2341                             AC_FOUND_DEVICE,
 2342                             &path, &cgd);
 2343         xpt_release_path(&path);
 2344 
 2345         return(1);
 2346 }
 2347 
 2348 static int
 2349 xptsetasyncbusfunc(struct cam_eb *bus, void *arg)
 2350 {
 2351         struct cam_path path;
 2352         struct ccb_pathinq cpi;
 2353         struct async_node *cur_entry;
 2354 
 2355         cur_entry = (struct async_node *)arg;
 2356 
 2357         xpt_compile_path(&path, /*periph*/NULL,
 2358                          bus->sim->path_id,
 2359                          CAM_TARGET_WILDCARD,
 2360                          CAM_LUN_WILDCARD);
 2361         xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
 2362         cpi.ccb_h.func_code = XPT_PATH_INQ;
 2363         xpt_action((union ccb *)&cpi);
 2364         cur_entry->callback(cur_entry->callback_arg,
 2365                             AC_PATH_REGISTERED,
 2366                             &path, &cpi);
 2367         xpt_release_path(&path);
 2368 
 2369         return(1);
 2370 }
 2371 
 2372 static void
 2373 xpt_action_sasync_cb(void *context, int pending)
 2374 {
 2375         struct async_node *cur_entry;
 2376         struct xpt_task *task;
 2377         uint32_t added;
 2378 
 2379         task = (struct xpt_task *)context;
 2380         cur_entry = (struct async_node *)task->data1;
 2381         added = task->data2;
 2382 
 2383         if ((added & AC_FOUND_DEVICE) != 0) {
 2384                 /*
 2385                  * Get this peripheral up to date with all
 2386                  * the currently existing devices.
 2387                  */
 2388                 xpt_for_all_devices(xptsetasyncfunc, cur_entry);
 2389         }
 2390         if ((added & AC_PATH_REGISTERED) != 0) {
 2391                 /*
 2392                  * Get this peripheral up to date with all
 2393                  * the currently existing busses.
 2394                  */
 2395                 xpt_for_all_busses(xptsetasyncbusfunc, cur_entry);
 2396         }
 2397 
 2398         free(task, M_CAMXPT);
 2399 }
 2400 
 2401 void
 2402 xpt_action(union ccb *start_ccb)
 2403 {
 2404 
 2405         CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_action\n"));
 2406 
 2407         start_ccb->ccb_h.status = CAM_REQ_INPROG;
 2408         (*(start_ccb->ccb_h.path->bus->xport->action))(start_ccb);
 2409 }
 2410 
 2411 void
 2412 xpt_action_default(union ccb *start_ccb)
 2413 {
 2414 
 2415         CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_action_default\n"));
 2416 
 2417 
 2418         switch (start_ccb->ccb_h.func_code) {
 2419         case XPT_SCSI_IO:
 2420         {
 2421                 struct cam_ed *device;
 2422 #ifdef CAMDEBUG
 2423                 char cdb_str[(SCSI_MAX_CDBLEN * 3) + 1];
 2424                 struct cam_path *path;
 2425 
 2426                 path = start_ccb->ccb_h.path;
 2427 #endif
 2428 
 2429                 /*
 2430                  * For the sake of compatibility with SCSI-1
 2431                  * devices that may not understand the identify
 2432                  * message, we include lun information in the
 2433                  * second byte of all commands.  SCSI-1 specifies
 2434                  * that luns are a 3 bit value and reserves only 3
 2435                  * bits for lun information in the CDB.  Later
 2436                  * revisions of the SCSI spec allow for more than 8
 2437                  * luns, but have deprecated lun information in the
 2438                  * CDB.  So, if the lun won't fit, we must omit.
 2439                  *
 2440                  * Also be aware that during initial probing for devices,
 2441                  * the inquiry information is unknown but initialized to 0.
 2442                  * This means that this code will be exercised while probing
 2443                  * devices with an ANSI revision greater than 2.
 2444                  */
 2445                 device = start_ccb->ccb_h.path->device;
 2446                 if (device->protocol_version <= SCSI_REV_2
 2447                  && start_ccb->ccb_h.target_lun < 8
 2448                  && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) {
 2449 
 2450                         start_ccb->csio.cdb_io.cdb_bytes[1] |=
 2451                             start_ccb->ccb_h.target_lun << 5;
 2452                 }
 2453                 start_ccb->csio.scsi_status = SCSI_STATUS_OK;
 2454                 CAM_DEBUG(path, CAM_DEBUG_CDB,("%s. CDB: %s\n",
 2455                           scsi_op_desc(start_ccb->csio.cdb_io.cdb_bytes[0],
 2456                                        &path->device->inq_data),
 2457                           scsi_cdb_string(start_ccb->csio.cdb_io.cdb_bytes,
 2458                                           cdb_str, sizeof(cdb_str))));
 2459         }
 2460         /* FALLTHROUGH */
 2461         case XPT_TARGET_IO:
 2462         case XPT_CONT_TARGET_IO:
 2463                 start_ccb->csio.sense_resid = 0;
 2464                 start_ccb->csio.resid = 0;
 2465                 /* FALLTHROUGH */
 2466         case XPT_ATA_IO:
 2467                 if (start_ccb->ccb_h.func_code == XPT_ATA_IO) {
 2468                         start_ccb->ataio.resid = 0;
 2469                 }
 2470         case XPT_RESET_DEV:
 2471         case XPT_ENG_EXEC:
 2472         {
 2473                 struct cam_path *path;
 2474                 int runq;
 2475 
 2476                 path = start_ccb->ccb_h.path;
 2477 
 2478                 cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb);
 2479                 if (path->device->qfrozen_cnt == 0)
 2480                         runq = xpt_schedule_dev_sendq(path->bus, path->device);
 2481                 else
 2482                         runq = 0;
 2483                 if (runq != 0)
 2484                         xpt_run_dev_sendq(path->bus);
 2485                 break;
 2486         }
 2487         case XPT_CALC_GEOMETRY:
 2488         {
 2489                 struct cam_sim *sim;
 2490 
 2491                 /* Filter out garbage */
 2492                 if (start_ccb->ccg.block_size == 0
 2493                  || start_ccb->ccg.volume_size == 0) {
 2494                         start_ccb->ccg.cylinders = 0;
 2495                         start_ccb->ccg.heads = 0;
 2496                         start_ccb->ccg.secs_per_track = 0;
 2497                         start_ccb->ccb_h.status = CAM_REQ_CMP;
 2498                         break;
 2499                 }
 2500 #ifdef PC98
 2501                 /*
 2502                  * In a PC-98 system, geometry translation depens on
 2503                  * the "real" device geometry obtained from mode page 4.
 2504                  * SCSI geometry translation is performed in the
 2505                  * initialization routine of the SCSI BIOS and the result
 2506                  * stored in host memory.  If the translation is available
 2507                  * in host memory, use it.  If not, rely on the default
 2508                  * translation the device driver performs.
 2509                  */
 2510                 if (scsi_da_bios_params(&start_ccb->ccg) != 0) {
 2511                         start_ccb->ccb_h.status = CAM_REQ_CMP;
 2512                         break;
 2513                 }
 2514 #endif
 2515                 sim = start_ccb->ccb_h.path->bus->sim;
 2516                 (*(sim->sim_action))(sim, start_ccb);
 2517                 break;
 2518         }
 2519         case XPT_ABORT:
 2520         {
 2521                 union ccb* abort_ccb;
 2522 
 2523                 abort_ccb = start_ccb->cab.abort_ccb;
 2524                 if (XPT_FC_IS_DEV_QUEUED(abort_ccb)) {
 2525 
 2526                         if (abort_ccb->ccb_h.pinfo.index >= 0) {
 2527                                 struct cam_ccbq *ccbq;
 2528 
 2529                                 ccbq = &abort_ccb->ccb_h.path->device->ccbq;
 2530                                 cam_ccbq_remove_ccb(ccbq, abort_ccb);
 2531                                 abort_ccb->ccb_h.status =
 2532                                     CAM_REQ_ABORTED|CAM_DEV_QFRZN;
 2533                                 xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
 2534                                 xpt_done(abort_ccb);
 2535                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 2536                                 break;
 2537                         }
 2538                         if (abort_ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX
 2539                          && (abort_ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) {
 2540                                 /*
 2541                                  * We've caught this ccb en route to
 2542                                  * the SIM.  Flag it for abort and the
 2543                                  * SIM will do so just before starting
 2544                                  * real work on the CCB.
 2545                                  */
 2546                                 abort_ccb->ccb_h.status =
 2547                                     CAM_REQ_ABORTED|CAM_DEV_QFRZN;
 2548                                 xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
 2549                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 2550                                 break;
 2551                         }
 2552                 }
 2553                 if (XPT_FC_IS_QUEUED(abort_ccb)
 2554                  && (abort_ccb->ccb_h.pinfo.index == CAM_DONEQ_INDEX)) {
 2555                         /*
 2556                          * It's already completed but waiting
 2557                          * for our SWI to get to it.
 2558                          */
 2559                         start_ccb->ccb_h.status = CAM_UA_ABORT;
 2560                         break;
 2561                 }
 2562                 /*
 2563                  * If we weren't able to take care of the abort request
 2564                  * in the XPT, pass the request down to the SIM for processing.
 2565                  */
 2566         }
 2567         /* FALLTHROUGH */
 2568         case XPT_ACCEPT_TARGET_IO:
 2569         case XPT_EN_LUN:
 2570         case XPT_IMMED_NOTIFY:
 2571         case XPT_NOTIFY_ACK:
 2572         case XPT_RESET_BUS:
 2573         case XPT_IMMEDIATE_NOTIFY:
 2574         case XPT_NOTIFY_ACKNOWLEDGE:
 2575         case XPT_GET_SIM_KNOB:
 2576         case XPT_SET_SIM_KNOB:
 2577         {
 2578                 struct cam_sim *sim;
 2579 
 2580                 sim = start_ccb->ccb_h.path->bus->sim;
 2581                 (*(sim->sim_action))(sim, start_ccb);
 2582                 break;
 2583         }
 2584         case XPT_PATH_INQ:
 2585         {
 2586                 struct cam_sim *sim;
 2587 
 2588                 sim = start_ccb->ccb_h.path->bus->sim;
 2589                 (*(sim->sim_action))(sim, start_ccb);
 2590                 break;
 2591         }
 2592         case XPT_PATH_STATS:
 2593                 start_ccb->cpis.last_reset =
 2594                         start_ccb->ccb_h.path->bus->last_reset;
 2595                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 2596                 break;
 2597         case XPT_GDEV_TYPE:
 2598         {
 2599                 struct cam_ed *dev;
 2600 
 2601                 dev = start_ccb->ccb_h.path->device;
 2602                 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
 2603                         start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
 2604                 } else {
 2605                         struct ccb_getdev *cgd;
 2606                         struct cam_eb *bus;
 2607                         struct cam_et *tar;
 2608 
 2609                         cgd = &start_ccb->cgd;
 2610                         bus = cgd->ccb_h.path->bus;
 2611                         tar = cgd->ccb_h.path->target;
 2612                         cgd->protocol = dev->protocol;
 2613                         cgd->inq_data = dev->inq_data;
 2614                         cgd->ident_data = dev->ident_data;
 2615                         cgd->ccb_h.status = CAM_REQ_CMP;
 2616                         cgd->serial_num_len = dev->serial_num_len;
 2617                         if ((dev->serial_num_len > 0)
 2618                          && (dev->serial_num != NULL))
 2619                                 bcopy(dev->serial_num, cgd->serial_num,
 2620                                       dev->serial_num_len);
 2621                 }
 2622                 break;
 2623         }
 2624         case XPT_GDEV_STATS:
 2625         {
 2626                 struct cam_ed *dev;
 2627 
 2628                 dev = start_ccb->ccb_h.path->device;
 2629                 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
 2630                         start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
 2631                 } else {
 2632                         struct ccb_getdevstats *cgds;
 2633                         struct cam_eb *bus;
 2634                         struct cam_et *tar;
 2635 
 2636                         cgds = &start_ccb->cgds;
 2637                         bus = cgds->ccb_h.path->bus;
 2638                         tar = cgds->ccb_h.path->target;
 2639                         cgds->dev_openings = dev->ccbq.dev_openings;
 2640                         cgds->dev_active = dev->ccbq.dev_active;
 2641                         cgds->devq_openings = dev->ccbq.devq_openings;
 2642                         cgds->devq_queued = dev->ccbq.queue.entries;
 2643                         cgds->held = dev->ccbq.held;
 2644                         cgds->last_reset = tar->last_reset;
 2645                         cgds->maxtags = dev->maxtags;
 2646                         cgds->mintags = dev->mintags;
 2647                         if (timevalcmp(&tar->last_reset, &bus->last_reset, <))
 2648                                 cgds->last_reset = bus->last_reset;
 2649                         cgds->ccb_h.status = CAM_REQ_CMP;
 2650                 }
 2651                 break;
 2652         }
 2653         case XPT_GDEVLIST:
 2654         {
 2655                 struct cam_periph       *nperiph;
 2656                 struct periph_list      *periph_head;
 2657                 struct ccb_getdevlist   *cgdl;
 2658                 u_int                   i;
 2659                 struct cam_ed           *device;
 2660                 int                     found;
 2661 
 2662 
 2663                 found = 0;
 2664 
 2665                 /*
 2666                  * Don't want anyone mucking with our data.
 2667                  */
 2668                 device = start_ccb->ccb_h.path->device;
 2669                 periph_head = &device->periphs;
 2670                 cgdl = &start_ccb->cgdl;
 2671 
 2672                 /*
 2673                  * Check and see if the list has changed since the user
 2674                  * last requested a list member.  If so, tell them that the
 2675                  * list has changed, and therefore they need to start over
 2676                  * from the beginning.
 2677                  */
 2678                 if ((cgdl->index != 0) &&
 2679                     (cgdl->generation != device->generation)) {
 2680                         cgdl->status = CAM_GDEVLIST_LIST_CHANGED;
 2681                         break;
 2682                 }
 2683 
 2684                 /*
 2685                  * Traverse the list of peripherals and attempt to find
 2686                  * the requested peripheral.
 2687                  */
 2688                 for (nperiph = SLIST_FIRST(periph_head), i = 0;
 2689                      (nperiph != NULL) && (i <= cgdl->index);
 2690                      nperiph = SLIST_NEXT(nperiph, periph_links), i++) {
 2691                         if (i == cgdl->index) {
 2692                                 strncpy(cgdl->periph_name,
 2693                                         nperiph->periph_name,
 2694                                         DEV_IDLEN);
 2695                                 cgdl->unit_number = nperiph->unit_number;
 2696                                 found = 1;
 2697                         }
 2698                 }
 2699                 if (found == 0) {
 2700                         cgdl->status = CAM_GDEVLIST_ERROR;
 2701                         break;
 2702                 }
 2703 
 2704                 if (nperiph == NULL)
 2705                         cgdl->status = CAM_GDEVLIST_LAST_DEVICE;
 2706                 else
 2707                         cgdl->status = CAM_GDEVLIST_MORE_DEVS;
 2708 
 2709                 cgdl->index++;
 2710                 cgdl->generation = device->generation;
 2711 
 2712                 cgdl->ccb_h.status = CAM_REQ_CMP;
 2713                 break;
 2714         }
 2715         case XPT_DEV_MATCH:
 2716         {
 2717                 dev_pos_type position_type;
 2718                 struct ccb_dev_match *cdm;
 2719 
 2720                 cdm = &start_ccb->cdm;
 2721 
 2722                 /*
 2723                  * There are two ways of getting at information in the EDT.
 2724                  * The first way is via the primary EDT tree.  It starts
 2725                  * with a list of busses, then a list of targets on a bus,
 2726                  * then devices/luns on a target, and then peripherals on a
 2727                  * device/lun.  The "other" way is by the peripheral driver
 2728                  * lists.  The peripheral driver lists are organized by
 2729                  * peripheral driver.  (obviously)  So it makes sense to
 2730                  * use the peripheral driver list if the user is looking
 2731                  * for something like "da1", or all "da" devices.  If the
 2732                  * user is looking for something on a particular bus/target
 2733                  * or lun, it's generally better to go through the EDT tree.
 2734                  */
 2735 
 2736                 if (cdm->pos.position_type != CAM_DEV_POS_NONE)
 2737                         position_type = cdm->pos.position_type;
 2738                 else {
 2739                         u_int i;
 2740 
 2741                         position_type = CAM_DEV_POS_NONE;
 2742 
 2743                         for (i = 0; i < cdm->num_patterns; i++) {
 2744                                 if ((cdm->patterns[i].type == DEV_MATCH_BUS)
 2745                                  ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){
 2746                                         position_type = CAM_DEV_POS_EDT;
 2747                                         break;
 2748                                 }
 2749                         }
 2750 
 2751                         if (cdm->num_patterns == 0)
 2752                                 position_type = CAM_DEV_POS_EDT;
 2753                         else if (position_type == CAM_DEV_POS_NONE)
 2754                                 position_type = CAM_DEV_POS_PDRV;
 2755                 }
 2756 
 2757                 switch(position_type & CAM_DEV_POS_TYPEMASK) {
 2758                 case CAM_DEV_POS_EDT:
 2759                         xptedtmatch(cdm);
 2760                         break;
 2761                 case CAM_DEV_POS_PDRV:
 2762                         xptperiphlistmatch(cdm);
 2763                         break;
 2764                 default:
 2765                         cdm->status = CAM_DEV_MATCH_ERROR;
 2766                         break;
 2767                 }
 2768 
 2769                 if (cdm->status == CAM_DEV_MATCH_ERROR)
 2770                         start_ccb->ccb_h.status = CAM_REQ_CMP_ERR;
 2771                 else
 2772                         start_ccb->ccb_h.status = CAM_REQ_CMP;
 2773 
 2774                 break;
 2775         }
 2776         case XPT_SASYNC_CB:
 2777         {
 2778                 struct ccb_setasync *csa;
 2779                 struct async_node *cur_entry;
 2780                 struct async_list *async_head;
 2781                 u_int32_t added;
 2782 
 2783                 csa = &start_ccb->csa;
 2784                 added = csa->event_enable;
 2785                 async_head = &csa->ccb_h.path->device->asyncs;
 2786 
 2787                 /*
 2788                  * If there is already an entry for us, simply
 2789                  * update it.
 2790                  */
 2791                 cur_entry = SLIST_FIRST(async_head);
 2792                 while (cur_entry != NULL) {
 2793                         if ((cur_entry->callback_arg == csa->callback_arg)
 2794                          && (cur_entry->callback == csa->callback))
 2795                                 break;
 2796                         cur_entry = SLIST_NEXT(cur_entry, links);
 2797                 }
 2798 
 2799                 if (cur_entry != NULL) {
 2800                         /*
 2801                          * If the request has no flags set,
 2802                          * remove the entry.
 2803                          */
 2804                         added &= ~cur_entry->event_enable;
 2805                         if (csa->event_enable == 0) {
 2806                                 SLIST_REMOVE(async_head, cur_entry,
 2807                                              async_node, links);
 2808                                 csa->ccb_h.path->device->refcount--;
 2809                                 free(cur_entry, M_CAMXPT);
 2810                         } else {
 2811                                 cur_entry->event_enable = csa->event_enable;
 2812                         }
 2813                 } else {
 2814                         cur_entry = malloc(sizeof(*cur_entry), M_CAMXPT,
 2815                                            M_NOWAIT);
 2816                         if (cur_entry == NULL) {
 2817                                 csa->ccb_h.status = CAM_RESRC_UNAVAIL;
 2818                                 break;
 2819                         }
 2820                         cur_entry->event_enable = csa->event_enable;
 2821                         cur_entry->callback_arg = csa->callback_arg;
 2822                         cur_entry->callback = csa->callback;
 2823                         SLIST_INSERT_HEAD(async_head, cur_entry, links);
 2824                         csa->ccb_h.path->device->refcount++;
 2825                 }
 2826 
 2827                 /*
 2828                  * Need to decouple this operation via a taqskqueue so that
 2829                  * the locking doesn't become a mess.
 2830                  */
 2831                 if ((added & (AC_FOUND_DEVICE | AC_PATH_REGISTERED)) != 0) {
 2832                         struct xpt_task *task;
 2833 
 2834                         task = malloc(sizeof(struct xpt_task), M_CAMXPT,
 2835                                       M_NOWAIT);
 2836                         if (task == NULL) {
 2837                                 csa->ccb_h.status = CAM_RESRC_UNAVAIL;
 2838                                 break;
 2839                         }
 2840 
 2841                         TASK_INIT(&task->task, 0, xpt_action_sasync_cb, task);
 2842                         task->data1 = cur_entry;
 2843                         task->data2 = added;
 2844                         taskqueue_enqueue(taskqueue_thread, &task->task);
 2845                 }
 2846 
 2847                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 2848                 break;
 2849         }
 2850         case XPT_REL_SIMQ:
 2851         {
 2852                 struct ccb_relsim *crs;
 2853                 struct cam_ed *dev;
 2854 
 2855                 crs = &start_ccb->crs;
 2856                 dev = crs->ccb_h.path->device;
 2857                 if (dev == NULL) {
 2858 
 2859                         crs->ccb_h.status = CAM_DEV_NOT_THERE;
 2860                         break;
 2861                 }
 2862 
 2863                 if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) {
 2864 
 2865                         if (INQ_DATA_TQ_ENABLED(&dev->inq_data)) {
 2866                                 /* Don't ever go below one opening */
 2867                                 if (crs->openings > 0) {
 2868                                         xpt_dev_ccbq_resize(crs->ccb_h.path,
 2869                                                             crs->openings);
 2870 
 2871                                         if (bootverbose) {
 2872                                                 xpt_print(crs->ccb_h.path,
 2873                                                     "tagged openings now %d\n",
 2874                                                     crs->openings);
 2875                                         }
 2876                                 }
 2877                         }
 2878                 }
 2879 
 2880                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) {
 2881 
 2882                         if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
 2883 
 2884                                 /*
 2885                                  * Just extend the old timeout and decrement
 2886                                  * the freeze count so that a single timeout
 2887                                  * is sufficient for releasing the queue.
 2888                                  */
 2889                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
 2890                                 callout_stop(&dev->callout);
 2891                         } else {
 2892 
 2893                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
 2894                         }
 2895 
 2896                         callout_reset(&dev->callout,
 2897                             (crs->release_timeout * hz) / 1000,
 2898                             xpt_release_devq_timeout, dev);
 2899 
 2900                         dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING;
 2901 
 2902                 }
 2903 
 2904                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) {
 2905 
 2906                         if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) {
 2907                                 /*
 2908                                  * Decrement the freeze count so that a single
 2909                                  * completion is still sufficient to unfreeze
 2910                                  * the queue.
 2911                                  */
 2912                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
 2913                         } else {
 2914 
 2915                                 dev->flags |= CAM_DEV_REL_ON_COMPLETE;
 2916                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
 2917                         }
 2918                 }
 2919 
 2920                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) {
 2921 
 2922                         if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
 2923                          || (dev->ccbq.dev_active == 0)) {
 2924 
 2925                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
 2926                         } else {
 2927 
 2928                                 dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY;
 2929                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
 2930                         }
 2931                 }
 2932 
 2933                 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0) {
 2934 
 2935                         xpt_release_devq(crs->ccb_h.path, /*count*/1,
 2936                                          /*run_queue*/TRUE);
 2937                 }
 2938                 start_ccb->crs.qfrozen_cnt = dev->qfrozen_cnt;
 2939                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 2940                 break;
 2941         }
 2942         case XPT_DEBUG: {
 2943 #ifdef CAMDEBUG
 2944 #ifdef CAM_DEBUG_DELAY
 2945                 cam_debug_delay = CAM_DEBUG_DELAY;
 2946 #endif
 2947                 cam_dflags = start_ccb->cdbg.flags;
 2948                 if (cam_dpath != NULL) {
 2949                         xpt_free_path(cam_dpath);
 2950                         cam_dpath = NULL;
 2951                 }
 2952 
 2953                 if (cam_dflags != CAM_DEBUG_NONE) {
 2954                         if (xpt_create_path(&cam_dpath, xpt_periph,
 2955                                             start_ccb->ccb_h.path_id,
 2956                                             start_ccb->ccb_h.target_id,
 2957                                             start_ccb->ccb_h.target_lun) !=
 2958                                             CAM_REQ_CMP) {
 2959                                 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
 2960                                 cam_dflags = CAM_DEBUG_NONE;
 2961                         } else {
 2962                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 2963                                 xpt_print(cam_dpath, "debugging flags now %x\n",
 2964                                     cam_dflags);
 2965                         }
 2966                 } else {
 2967                         cam_dpath = NULL;
 2968                         start_ccb->ccb_h.status = CAM_REQ_CMP;
 2969                 }
 2970 #else /* !CAMDEBUG */
 2971                 start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
 2972 #endif /* CAMDEBUG */
 2973                 break;
 2974         }
 2975         case XPT_NOOP:
 2976                 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0)
 2977                         xpt_freeze_devq(start_ccb->ccb_h.path, 1);
 2978                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 2979                 break;
 2980         default:
 2981         case XPT_SDEV_TYPE:
 2982         case XPT_TERM_IO:
 2983         case XPT_ENG_INQ:
 2984                 /* XXX Implement */
 2985                 start_ccb->ccb_h.status = CAM_PROVIDE_FAIL;
 2986                 break;
 2987         }
 2988 }
 2989 
 2990 void
 2991 xpt_polled_action(union ccb *start_ccb)
 2992 {
 2993         u_int32_t timeout;
 2994         struct    cam_sim *sim;
 2995         struct    cam_devq *devq;
 2996         struct    cam_ed *dev;
 2997 
 2998 
 2999         timeout = start_ccb->ccb_h.timeout;
 3000         sim = start_ccb->ccb_h.path->bus->sim;
 3001         devq = sim->devq;
 3002         dev = start_ccb->ccb_h.path->device;
 3003 
 3004         mtx_assert(sim->mtx, MA_OWNED);
 3005 
 3006         /*
 3007          * Steal an opening so that no other queued requests
 3008          * can get it before us while we simulate interrupts.
 3009          */
 3010         dev->ccbq.devq_openings--;
 3011         dev->ccbq.dev_openings--;
 3012 
 3013         while(((devq != NULL && devq->send_openings <= 0) ||
 3014            dev->ccbq.dev_openings < 0) && (--timeout > 0)) {
 3015                 DELAY(1000);
 3016                 (*(sim->sim_poll))(sim);
 3017                 camisr_runqueue(&sim->sim_doneq);
 3018         }
 3019 
 3020         dev->ccbq.devq_openings++;
 3021         dev->ccbq.dev_openings++;
 3022 
 3023         if (timeout != 0) {
 3024                 xpt_action(start_ccb);
 3025                 while(--timeout > 0) {
 3026                         (*(sim->sim_poll))(sim);
 3027                         camisr_runqueue(&sim->sim_doneq);
 3028                         if ((start_ccb->ccb_h.status  & CAM_STATUS_MASK)
 3029                             != CAM_REQ_INPROG)
 3030                                 break;
 3031                         DELAY(1000);
 3032                 }
 3033                 if (timeout == 0) {
 3034                         /*
 3035                          * XXX Is it worth adding a sim_timeout entry
 3036                          * point so we can attempt recovery?  If
 3037                          * this is only used for dumps, I don't think
 3038                          * it is.
 3039                          */
 3040                         start_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
 3041                 }
 3042         } else {
 3043                 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
 3044         }
 3045 }
 3046 
 3047 /*
 3048  * Schedule a peripheral driver to receive a ccb when it's
 3049  * target device has space for more transactions.
 3050  */
 3051 void
 3052 xpt_schedule(struct cam_periph *perph, u_int32_t new_priority)
 3053 {
 3054         struct cam_ed *device;
 3055         int runq;
 3056 
 3057         mtx_assert(perph->sim->mtx, MA_OWNED);
 3058 
 3059         CAM_DEBUG(perph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n"));
 3060         device = perph->path->device;
 3061         if (periph_is_queued(perph)) {
 3062                 /* Simply reorder based on new priority */
 3063                 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
 3064                           ("   change priority to %d\n", new_priority));
 3065                 if (new_priority < perph->pinfo.priority) {
 3066                         camq_change_priority(&device->drvq,
 3067                                              perph->pinfo.index,
 3068                                              new_priority);
 3069                 }
 3070                 runq = 0;
 3071         } else {
 3072                 /* New entry on the queue */
 3073                 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
 3074                           ("   added periph to queue\n"));
 3075                 perph->pinfo.priority = new_priority;
 3076                 perph->pinfo.generation = ++device->drvq.generation;
 3077                 camq_insert(&device->drvq, &perph->pinfo);
 3078                 runq = xpt_schedule_dev_allocq(perph->path->bus, device);
 3079         }
 3080         if (runq != 0) {
 3081                 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
 3082                           ("   calling xpt_run_devq\n"));
 3083                 xpt_run_dev_allocq(perph->path->bus);
 3084         }
 3085 }
 3086 
 3087 
 3088 /*
 3089  * Schedule a device to run on a given queue.
 3090  * If the device was inserted as a new entry on the queue,
 3091  * return 1 meaning the device queue should be run. If we
 3092  * were already queued, implying someone else has already
 3093  * started the queue, return 0 so the caller doesn't attempt
 3094  * to run the queue.
 3095  */
 3096 int
 3097 xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo,
 3098                  u_int32_t new_priority)
 3099 {
 3100         int retval;
 3101         u_int32_t old_priority;
 3102 
 3103         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_schedule_dev\n"));
 3104 
 3105         old_priority = pinfo->priority;
 3106 
 3107         /*
 3108          * Are we already queued?
 3109          */
 3110         if (pinfo->index != CAM_UNQUEUED_INDEX) {
 3111                 /* Simply reorder based on new priority */
 3112                 if (new_priority < old_priority) {
 3113                         camq_change_priority(queue, pinfo->index,
 3114                                              new_priority);
 3115                         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3116                                         ("changed priority to %d\n",
 3117                                          new_priority));
 3118                 }
 3119                 retval = 0;
 3120         } else {
 3121                 /* New entry on the queue */
 3122                 if (new_priority < old_priority)
 3123                         pinfo->priority = new_priority;
 3124 
 3125                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3126                                 ("Inserting onto queue\n"));
 3127                 pinfo->generation = ++queue->generation;
 3128                 camq_insert(queue, pinfo);
 3129                 retval = 1;
 3130         }
 3131         return (retval);
 3132 }
 3133 
 3134 static void
 3135 xpt_run_dev_allocq(struct cam_eb *bus)
 3136 {
 3137         struct  cam_devq *devq;
 3138 
 3139         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_allocq\n"));
 3140         devq = bus->sim->devq;
 3141 
 3142         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3143                         ("   qfrozen_cnt == 0x%x, entries == %d, "
 3144                          "openings == %d, active == %d\n",
 3145                          devq->alloc_queue.qfrozen_cnt,
 3146                          devq->alloc_queue.entries,
 3147                          devq->alloc_openings,
 3148                          devq->alloc_active));
 3149 
 3150         devq->alloc_queue.qfrozen_cnt++;
 3151         while ((devq->alloc_queue.entries > 0)
 3152             && (devq->alloc_openings > 0)
 3153             && (devq->alloc_queue.qfrozen_cnt <= 1)) {
 3154                 struct  cam_ed_qinfo *qinfo;
 3155                 struct  cam_ed *device;
 3156                 union   ccb *work_ccb;
 3157                 struct  cam_periph *drv;
 3158                 struct  camq *drvq;
 3159 
 3160                 qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->alloc_queue,
 3161                                                            CAMQ_HEAD);
 3162                 device = qinfo->device;
 3163 
 3164                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3165                                 ("running device %p\n", device));
 3166 
 3167                 drvq = &device->drvq;
 3168 
 3169 #ifdef CAMDEBUG
 3170                 if (drvq->entries <= 0) {
 3171                         panic("xpt_run_dev_allocq: "
 3172                               "Device on queue without any work to do");
 3173                 }
 3174 #endif
 3175                 if ((work_ccb = xpt_get_ccb(device)) != NULL) {
 3176                         devq->alloc_openings--;
 3177                         devq->alloc_active++;
 3178                         drv = (struct cam_periph*)camq_remove(drvq, CAMQ_HEAD);
 3179                         xpt_setup_ccb(&work_ccb->ccb_h, drv->path,
 3180                                       drv->pinfo.priority);
 3181                         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3182                                         ("calling periph start\n"));
 3183                         drv->periph_start(drv, work_ccb);
 3184                 } else {
 3185                         /*
 3186                          * Malloc failure in alloc_ccb
 3187                          */
 3188                         /*
 3189                          * XXX add us to a list to be run from free_ccb
 3190                          * if we don't have any ccbs active on this
 3191                          * device queue otherwise we may never get run
 3192                          * again.
 3193                          */
 3194                         break;
 3195                 }
 3196 
 3197                 if (drvq->entries > 0) {
 3198                         /* We have more work.  Attempt to reschedule */
 3199                         xpt_schedule_dev_allocq(bus, device);
 3200                 }
 3201         }
 3202         devq->alloc_queue.qfrozen_cnt--;
 3203 }
 3204 
 3205 void
 3206 xpt_run_dev_sendq(struct cam_eb *bus)
 3207 {
 3208         struct  cam_devq *devq;
 3209 
 3210         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_sendq\n"));
 3211 
 3212         devq = bus->sim->devq;
 3213 
 3214         devq->send_queue.qfrozen_cnt++;
 3215         while ((devq->send_queue.entries > 0)
 3216             && (devq->send_openings > 0)) {
 3217                 struct  cam_ed_qinfo *qinfo;
 3218                 struct  cam_ed *device;
 3219                 union ccb *work_ccb;
 3220                 struct  cam_sim *sim;
 3221 
 3222                 if (devq->send_queue.qfrozen_cnt > 1) {
 3223                         break;
 3224                 }
 3225 
 3226                 qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->send_queue,
 3227                                                            CAMQ_HEAD);
 3228                 device = qinfo->device;
 3229 
 3230                 /*
 3231                  * If the device has been "frozen", don't attempt
 3232                  * to run it.
 3233                  */
 3234                 if (device->qfrozen_cnt > 0) {
 3235                         continue;
 3236                 }
 3237 
 3238                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3239                                 ("running device %p\n", device));
 3240 
 3241                 work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD);
 3242                 if (work_ccb == NULL) {
 3243                         printf("device on run queue with no ccbs???\n");
 3244                         continue;
 3245                 }
 3246 
 3247                 if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) {
 3248 
 3249                         mtx_lock(&xsoftc.xpt_lock);
 3250                         if (xsoftc.num_highpower <= 0) {
 3251                                 /*
 3252                                  * We got a high power command, but we
 3253                                  * don't have any available slots.  Freeze
 3254                                  * the device queue until we have a slot
 3255                                  * available.
 3256                                  */
 3257                                 device->qfrozen_cnt++;
 3258                                 STAILQ_INSERT_TAIL(&xsoftc.highpowerq,
 3259                                                    &work_ccb->ccb_h,
 3260                                                    xpt_links.stqe);
 3261 
 3262                                 mtx_unlock(&xsoftc.xpt_lock);
 3263                                 continue;
 3264                         } else {
 3265                                 /*
 3266                                  * Consume a high power slot while
 3267                                  * this ccb runs.
 3268                                  */
 3269                                 xsoftc.num_highpower--;
 3270                         }
 3271                         mtx_unlock(&xsoftc.xpt_lock);
 3272                 }
 3273                 devq->active_dev = device;
 3274                 cam_ccbq_remove_ccb(&device->ccbq, work_ccb);
 3275 
 3276                 cam_ccbq_send_ccb(&device->ccbq, work_ccb);
 3277 
 3278                 devq->send_openings--;
 3279                 devq->send_active++;
 3280 
 3281                 if (device->ccbq.queue.entries > 0)
 3282                         xpt_schedule_dev_sendq(bus, device);
 3283 
 3284                 if (work_ccb && (work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0){
 3285                         /*
 3286                          * The client wants to freeze the queue
 3287                          * after this CCB is sent.
 3288                          */
 3289                         device->qfrozen_cnt++;
 3290                 }
 3291 
 3292                 /* In Target mode, the peripheral driver knows best... */
 3293                 if (work_ccb->ccb_h.func_code == XPT_SCSI_IO) {
 3294                         if ((device->inq_flags & SID_CmdQue) != 0
 3295                          && work_ccb->csio.tag_action != CAM_TAG_ACTION_NONE)
 3296                                 work_ccb->ccb_h.flags |= CAM_TAG_ACTION_VALID;
 3297                         else
 3298                                 /*
 3299                                  * Clear this in case of a retried CCB that
 3300                                  * failed due to a rejected tag.
 3301                                  */
 3302                                 work_ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
 3303                 }
 3304 
 3305                 /*
 3306                  * Device queues can be shared among multiple sim instances
 3307                  * that reside on different busses.  Use the SIM in the queue
 3308                  * CCB's path, rather than the one in the bus that was passed
 3309                  * into this function.
 3310                  */
 3311                 sim = work_ccb->ccb_h.path->bus->sim;
 3312                 (*(sim->sim_action))(sim, work_ccb);
 3313 
 3314                 devq->active_dev = NULL;
 3315         }
 3316         devq->send_queue.qfrozen_cnt--;
 3317 }
 3318 
 3319 /*
 3320  * This function merges stuff from the slave ccb into the master ccb, while
 3321  * keeping important fields in the master ccb constant.
 3322  */
 3323 void
 3324 xpt_merge_ccb(union ccb *master_ccb, union ccb *slave_ccb)
 3325 {
 3326 
 3327         /*
 3328          * Pull fields that are valid for peripheral drivers to set
 3329          * into the master CCB along with the CCB "payload".
 3330          */
 3331         master_ccb->ccb_h.retry_count = slave_ccb->ccb_h.retry_count;
 3332         master_ccb->ccb_h.func_code = slave_ccb->ccb_h.func_code;
 3333         master_ccb->ccb_h.timeout = slave_ccb->ccb_h.timeout;
 3334         master_ccb->ccb_h.flags = slave_ccb->ccb_h.flags;
 3335         bcopy(&(&slave_ccb->ccb_h)[1], &(&master_ccb->ccb_h)[1],
 3336               sizeof(union ccb) - sizeof(struct ccb_hdr));
 3337 }
 3338 
 3339 void
 3340 xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority)
 3341 {
 3342 
 3343         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_setup_ccb\n"));
 3344         ccb_h->pinfo.priority = priority;
 3345         ccb_h->path = path;
 3346         ccb_h->path_id = path->bus->path_id;
 3347         if (path->target)
 3348                 ccb_h->target_id = path->target->target_id;
 3349         else
 3350                 ccb_h->target_id = CAM_TARGET_WILDCARD;
 3351         if (path->device) {
 3352                 ccb_h->target_lun = path->device->lun_id;
 3353                 ccb_h->pinfo.generation = ++path->device->ccbq.queue.generation;
 3354         } else {
 3355                 ccb_h->target_lun = CAM_TARGET_WILDCARD;
 3356         }
 3357         ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
 3358         ccb_h->flags = 0;
 3359 }
 3360 
 3361 /* Path manipulation functions */
 3362 cam_status
 3363 xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph,
 3364                 path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
 3365 {
 3366         struct     cam_path *path;
 3367         cam_status status;
 3368 
 3369         path = (struct cam_path *)malloc(sizeof(*path), M_CAMXPT, M_NOWAIT);
 3370 
 3371         if (path == NULL) {
 3372                 status = CAM_RESRC_UNAVAIL;
 3373                 return(status);
 3374         }
 3375         status = xpt_compile_path(path, perph, path_id, target_id, lun_id);
 3376         if (status != CAM_REQ_CMP) {
 3377                 free(path, M_CAMXPT);
 3378                 path = NULL;
 3379         }
 3380         *new_path_ptr = path;
 3381         return (status);
 3382 }
 3383 
 3384 cam_status
 3385 xpt_create_path_unlocked(struct cam_path **new_path_ptr,
 3386                          struct cam_periph *periph, path_id_t path_id,
 3387                          target_id_t target_id, lun_id_t lun_id)
 3388 {
 3389         struct     cam_path *path;
 3390         struct     cam_eb *bus = NULL;
 3391         cam_status status;
 3392         int        need_unlock = 0;
 3393 
 3394         path = (struct cam_path *)malloc(sizeof(*path), M_CAMXPT, M_WAITOK);
 3395 
 3396         if (path_id != CAM_BUS_WILDCARD) {
 3397                 bus = xpt_find_bus(path_id);
 3398                 if (bus != NULL) {
 3399                         need_unlock = 1;
 3400                         CAM_SIM_LOCK(bus->sim);
 3401                 }
 3402         }
 3403         status = xpt_compile_path(path, periph, path_id, target_id, lun_id);
 3404         if (need_unlock)
 3405                 CAM_SIM_UNLOCK(bus->sim);
 3406         if (status != CAM_REQ_CMP) {
 3407                 free(path, M_CAMXPT);
 3408                 path = NULL;
 3409         }
 3410         *new_path_ptr = path;
 3411         return (status);
 3412 }
 3413 
 3414 cam_status
 3415 xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph,
 3416                  path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
 3417 {
 3418         struct       cam_eb *bus;
 3419         struct       cam_et *target;
 3420         struct       cam_ed *device;
 3421         cam_status   status;
 3422 
 3423         status = CAM_REQ_CMP;   /* Completed without error */
 3424         target = NULL;          /* Wildcarded */
 3425         device = NULL;          /* Wildcarded */
 3426 
 3427         /*
 3428          * We will potentially modify the EDT, so block interrupts
 3429          * that may attempt to create cam paths.
 3430          */
 3431         bus = xpt_find_bus(path_id);
 3432         if (bus == NULL) {
 3433                 status = CAM_PATH_INVALID;
 3434         } else {
 3435                 target = xpt_find_target(bus, target_id);
 3436                 if (target == NULL) {
 3437                         /* Create one */
 3438                         struct cam_et *new_target;
 3439 
 3440                         new_target = xpt_alloc_target(bus, target_id);
 3441                         if (new_target == NULL) {
 3442                                 status = CAM_RESRC_UNAVAIL;
 3443                         } else {
 3444                                 target = new_target;
 3445                         }
 3446                 }
 3447                 if (target != NULL) {
 3448                         device = xpt_find_device(target, lun_id);
 3449                         if (device == NULL) {
 3450                                 /* Create one */
 3451                                 struct cam_ed *new_device;
 3452 
 3453                                 new_device =
 3454                                     (*(bus->xport->alloc_device))(bus,
 3455                                                                       target,
 3456                                                                       lun_id);
 3457                                 if (new_device == NULL) {
 3458                                         status = CAM_RESRC_UNAVAIL;
 3459                                 } else {
 3460                                         device = new_device;
 3461                                 }
 3462                         }
 3463                 }
 3464         }
 3465 
 3466         /*
 3467          * Only touch the user's data if we are successful.
 3468          */
 3469         if (status == CAM_REQ_CMP) {
 3470                 new_path->periph = perph;
 3471                 new_path->bus = bus;
 3472                 new_path->target = target;
 3473                 new_path->device = device;
 3474                 CAM_DEBUG(new_path, CAM_DEBUG_TRACE, ("xpt_compile_path\n"));
 3475         } else {
 3476                 if (device != NULL)
 3477                         xpt_release_device(bus, target, device);
 3478                 if (target != NULL)
 3479                         xpt_release_target(bus, target);
 3480                 if (bus != NULL)
 3481                         xpt_release_bus(bus);
 3482         }
 3483         return (status);
 3484 }
 3485 
 3486 void
 3487 xpt_release_path(struct cam_path *path)
 3488 {
 3489         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_path\n"));
 3490         if (path->device != NULL) {
 3491                 xpt_release_device(path->bus, path->target, path->device);
 3492                 path->device = NULL;
 3493         }
 3494         if (path->target != NULL) {
 3495                 xpt_release_target(path->bus, path->target);
 3496                 path->target = NULL;
 3497         }
 3498         if (path->bus != NULL) {
 3499                 xpt_release_bus(path->bus);
 3500                 path->bus = NULL;
 3501         }
 3502 }
 3503 
 3504 void
 3505 xpt_free_path(struct cam_path *path)
 3506 {
 3507 
 3508         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_free_path\n"));
 3509         xpt_release_path(path);
 3510         free(path, M_CAMXPT);
 3511 }
 3512 
 3513 
 3514 /*
 3515  * Return -1 for failure, 0 for exact match, 1 for match with wildcards
 3516  * in path1, 2 for match with wildcards in path2.
 3517  */
 3518 int
 3519 xpt_path_comp(struct cam_path *path1, struct cam_path *path2)
 3520 {
 3521         int retval = 0;
 3522 
 3523         if (path1->bus != path2->bus) {
 3524                 if (path1->bus->path_id == CAM_BUS_WILDCARD)
 3525                         retval = 1;
 3526                 else if (path2->bus->path_id == CAM_BUS_WILDCARD)
 3527                         retval = 2;
 3528                 else
 3529                         return (-1);
 3530         }
 3531         if (path1->target != path2->target) {
 3532                 if (path1->target->target_id == CAM_TARGET_WILDCARD) {
 3533                         if (retval == 0)
 3534                                 retval = 1;
 3535                 } else if (path2->target->target_id == CAM_TARGET_WILDCARD)
 3536                         retval = 2;
 3537                 else
 3538                         return (-1);
 3539         }
 3540         if (path1->device != path2->device) {
 3541                 if (path1->device->lun_id == CAM_LUN_WILDCARD) {
 3542                         if (retval == 0)
 3543                                 retval = 1;
 3544                 } else if (path2->device->lun_id == CAM_LUN_WILDCARD)
 3545                         retval = 2;
 3546                 else
 3547                         return (-1);
 3548         }
 3549         return (retval);
 3550 }
 3551 
 3552 void
 3553 xpt_print_path(struct cam_path *path)
 3554 {
 3555 
 3556         if (path == NULL)
 3557                 printf("(nopath): ");
 3558         else {
 3559                 if (path->periph != NULL)
 3560                         printf("(%s%d:", path->periph->periph_name,
 3561                                path->periph->unit_number);
 3562                 else
 3563                         printf("(noperiph:");
 3564 
 3565                 if (path->bus != NULL)
 3566                         printf("%s%d:%d:", path->bus->sim->sim_name,
 3567                                path->bus->sim->unit_number,
 3568                                path->bus->sim->bus_id);
 3569                 else
 3570                         printf("nobus:");
 3571 
 3572                 if (path->target != NULL)
 3573                         printf("%d:", path->target->target_id);
 3574                 else
 3575                         printf("X:");
 3576 
 3577                 if (path->device != NULL)
 3578                         printf("%d): ", path->device->lun_id);
 3579                 else
 3580                         printf("X): ");
 3581         }
 3582 }
 3583 
 3584 void
 3585 xpt_print(struct cam_path *path, const char *fmt, ...)
 3586 {
 3587         va_list ap;
 3588         xpt_print_path(path);
 3589         va_start(ap, fmt);
 3590         vprintf(fmt, ap);
 3591         va_end(ap);
 3592 }
 3593 
 3594 int
 3595 xpt_path_string(struct cam_path *path, char *str, size_t str_len)
 3596 {
 3597         struct sbuf sb;
 3598 
 3599 #ifdef INVARIANTS
 3600         if (path != NULL && path->bus != NULL)
 3601                 mtx_assert(path->bus->sim->mtx, MA_OWNED);
 3602 #endif
 3603 
 3604         sbuf_new(&sb, str, str_len, 0);
 3605 
 3606         if (path == NULL)
 3607                 sbuf_printf(&sb, "(nopath): ");
 3608         else {
 3609                 if (path->periph != NULL)
 3610                         sbuf_printf(&sb, "(%s%d:", path->periph->periph_name,
 3611                                     path->periph->unit_number);
 3612                 else
 3613                         sbuf_printf(&sb, "(noperiph:");
 3614 
 3615                 if (path->bus != NULL)
 3616                         sbuf_printf(&sb, "%s%d:%d:", path->bus->sim->sim_name,
 3617                                     path->bus->sim->unit_number,
 3618                                     path->bus->sim->bus_id);
 3619                 else
 3620                         sbuf_printf(&sb, "nobus:");
 3621 
 3622                 if (path->target != NULL)
 3623                         sbuf_printf(&sb, "%d:", path->target->target_id);
 3624                 else
 3625                         sbuf_printf(&sb, "X:");
 3626 
 3627                 if (path->device != NULL)
 3628                         sbuf_printf(&sb, "%d): ", path->device->lun_id);
 3629                 else
 3630                         sbuf_printf(&sb, "X): ");
 3631         }
 3632         sbuf_finish(&sb);
 3633 
 3634         return(sbuf_len(&sb));
 3635 }
 3636 
 3637 path_id_t
 3638 xpt_path_path_id(struct cam_path *path)
 3639 {
 3640         mtx_assert(path->bus->sim->mtx, MA_OWNED);
 3641 
 3642         return(path->bus->path_id);
 3643 }
 3644 
 3645 target_id_t
 3646 xpt_path_target_id(struct cam_path *path)
 3647 {
 3648         mtx_assert(path->bus->sim->mtx, MA_OWNED);
 3649 
 3650         if (path->target != NULL)
 3651                 return (path->target->target_id);
 3652         else
 3653                 return (CAM_TARGET_WILDCARD);
 3654 }
 3655 
 3656 lun_id_t
 3657 xpt_path_lun_id(struct cam_path *path)
 3658 {
 3659         mtx_assert(path->bus->sim->mtx, MA_OWNED);
 3660 
 3661         if (path->device != NULL)
 3662                 return (path->device->lun_id);
 3663         else
 3664                 return (CAM_LUN_WILDCARD);
 3665 }
 3666 
 3667 struct cam_sim *
 3668 xpt_path_sim(struct cam_path *path)
 3669 {
 3670 
 3671         return (path->bus->sim);
 3672 }
 3673 
 3674 struct cam_periph*
 3675 xpt_path_periph(struct cam_path *path)
 3676 {
 3677         mtx_assert(path->bus->sim->mtx, MA_OWNED);
 3678 
 3679         return (path->periph);
 3680 }
 3681 
 3682 /*
 3683  * Release a CAM control block for the caller.  Remit the cost of the structure
 3684  * to the device referenced by the path.  If the this device had no 'credits'
 3685  * and peripheral drivers have registered async callbacks for this notification
 3686  * call them now.
 3687  */
 3688 void
 3689 xpt_release_ccb(union ccb *free_ccb)
 3690 {
 3691         struct   cam_path *path;
 3692         struct   cam_ed *device;
 3693         struct   cam_eb *bus;
 3694         struct   cam_sim *sim;
 3695 
 3696         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_release_ccb\n"));
 3697         path = free_ccb->ccb_h.path;
 3698         device = path->device;
 3699         bus = path->bus;
 3700         sim = bus->sim;
 3701 
 3702         mtx_assert(sim->mtx, MA_OWNED);
 3703 
 3704         cam_ccbq_release_opening(&device->ccbq);
 3705         if (sim->ccb_count > sim->max_ccbs) {
 3706                 xpt_free_ccb(free_ccb);
 3707                 sim->ccb_count--;
 3708         } else {
 3709                 SLIST_INSERT_HEAD(&sim->ccb_freeq, &free_ccb->ccb_h,
 3710                     xpt_links.sle);
 3711         }
 3712         if (sim->devq == NULL) {
 3713                 return;
 3714         }
 3715         sim->devq->alloc_openings++;
 3716         sim->devq->alloc_active--;
 3717         /* XXX Turn this into an inline function - xpt_run_device?? */
 3718         if ((device_is_alloc_queued(device) == 0)
 3719          && (device->drvq.entries > 0)) {
 3720                 xpt_schedule_dev_allocq(bus, device);
 3721         }
 3722         if (dev_allocq_is_runnable(sim->devq))
 3723                 xpt_run_dev_allocq(bus);
 3724 }
 3725 
 3726 /* Functions accessed by SIM drivers */
 3727 
 3728 static struct xpt_xport xport_default = {
 3729         .alloc_device = xpt_alloc_device_default,
 3730         .action = xpt_action_default,
 3731         .async = xpt_dev_async_default,
 3732 };
 3733 
 3734 /*
 3735  * A sim structure, listing the SIM entry points and instance
 3736  * identification info is passed to xpt_bus_register to hook the SIM
 3737  * into the CAM framework.  xpt_bus_register creates a cam_eb entry
 3738  * for this new bus and places it in the array of busses and assigns
 3739  * it a path_id.  The path_id may be influenced by "hard wiring"
 3740  * information specified by the user.  Once interrupt services are
 3741  * available, the bus will be probed.
 3742  */
 3743 int32_t
 3744 xpt_bus_register(struct cam_sim *sim, device_t parent, u_int32_t bus)
 3745 {
 3746         struct cam_eb *new_bus;
 3747         struct cam_eb *old_bus;
 3748         struct ccb_pathinq cpi;
 3749         struct cam_path path;
 3750         cam_status status;
 3751 
 3752         mtx_assert(sim->mtx, MA_OWNED);
 3753 
 3754         sim->bus_id = bus;
 3755         new_bus = (struct cam_eb *)malloc(sizeof(*new_bus),
 3756                                           M_CAMXPT, M_NOWAIT);
 3757         if (new_bus == NULL) {
 3758                 /* Couldn't satisfy request */
 3759                 return (CAM_RESRC_UNAVAIL);
 3760         }
 3761 
 3762         if (strcmp(sim->sim_name, "xpt") != 0) {
 3763                 sim->path_id =
 3764                     xptpathid(sim->sim_name, sim->unit_number, sim->bus_id);
 3765         }
 3766 
 3767         TAILQ_INIT(&new_bus->et_entries);
 3768         new_bus->path_id = sim->path_id;
 3769         cam_sim_hold(sim);
 3770         new_bus->sim = sim;
 3771         timevalclear(&new_bus->last_reset);
 3772         new_bus->flags = 0;
 3773         new_bus->refcount = 1;  /* Held until a bus_deregister event */
 3774         new_bus->generation = 0;
 3775 
 3776         mtx_lock(&xsoftc.xpt_topo_lock);
 3777         old_bus = TAILQ_FIRST(&xsoftc.xpt_busses);
 3778         while (old_bus != NULL
 3779             && old_bus->path_id < new_bus->path_id)
 3780                 old_bus = TAILQ_NEXT(old_bus, links);
 3781         if (old_bus != NULL)
 3782                 TAILQ_INSERT_BEFORE(old_bus, new_bus, links);
 3783         else
 3784                 TAILQ_INSERT_TAIL(&xsoftc.xpt_busses, new_bus, links);
 3785         xsoftc.bus_generation++;
 3786         mtx_unlock(&xsoftc.xpt_topo_lock);
 3787 
 3788         /*
 3789          * Set a default transport so that a PATH_INQ can be issued to
 3790          * the SIM.  This will then allow for probing and attaching of
 3791          * a more appropriate transport.
 3792          */
 3793         new_bus->xport = &xport_default;
 3794 
 3795         bzero(&path, sizeof(path));
 3796         status = xpt_compile_path(&path, /*periph*/NULL, sim->path_id,
 3797                                   CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
 3798         if (status != CAM_REQ_CMP)
 3799                 printf("xpt_compile_path returned %d\n", status);
 3800 
 3801         xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
 3802         cpi.ccb_h.func_code = XPT_PATH_INQ;
 3803         xpt_action((union ccb *)&cpi);
 3804 
 3805         if (cpi.ccb_h.status == CAM_REQ_CMP) {
 3806                 switch (cpi.transport) {
 3807                 case XPORT_SPI:
 3808                 case XPORT_SAS:
 3809                 case XPORT_FC:
 3810                 case XPORT_USB:
 3811                 case XPORT_ISCSI:
 3812                 case XPORT_PPB:
 3813                         new_bus->xport = scsi_get_xport();
 3814                         break;
 3815                 case XPORT_ATA:
 3816                 case XPORT_SATA:
 3817                         new_bus->xport = ata_get_xport();
 3818                         break;
 3819                 default:
 3820                         new_bus->xport = &xport_default;
 3821                         break;
 3822                 }
 3823         }
 3824 
 3825         /* Notify interested parties */
 3826         if (sim->path_id != CAM_XPT_PATH_ID) {
 3827                 xpt_async(AC_PATH_REGISTERED, &path, &cpi);
 3828         }
 3829         xpt_release_path(&path);
 3830         return (CAM_SUCCESS);
 3831 }
 3832 
 3833 int32_t
 3834 xpt_bus_deregister(path_id_t pathid)
 3835 {
 3836         struct cam_path bus_path;
 3837         cam_status status;
 3838 
 3839         status = xpt_compile_path(&bus_path, NULL, pathid,
 3840                                   CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
 3841         if (status != CAM_REQ_CMP)
 3842                 return (status);
 3843 
 3844         xpt_async(AC_LOST_DEVICE, &bus_path, NULL);
 3845         xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL);
 3846 
 3847         /* Release the reference count held while registered. */
 3848         xpt_release_bus(bus_path.bus);
 3849         xpt_release_path(&bus_path);
 3850 
 3851         return (CAM_REQ_CMP);
 3852 }
 3853 
 3854 static path_id_t
 3855 xptnextfreepathid(void)
 3856 {
 3857         struct cam_eb *bus;
 3858         path_id_t pathid;
 3859         const char *strval;
 3860 
 3861         pathid = 0;
 3862         mtx_lock(&xsoftc.xpt_topo_lock);
 3863         bus = TAILQ_FIRST(&xsoftc.xpt_busses);
 3864 retry:
 3865         /* Find an unoccupied pathid */
 3866         while (bus != NULL && bus->path_id <= pathid) {
 3867                 if (bus->path_id == pathid)
 3868                         pathid++;
 3869                 bus = TAILQ_NEXT(bus, links);
 3870         }
 3871         mtx_unlock(&xsoftc.xpt_topo_lock);
 3872 
 3873         /*
 3874          * Ensure that this pathid is not reserved for
 3875          * a bus that may be registered in the future.
 3876          */
 3877         if (resource_string_value("scbus", pathid, "at", &strval) == 0) {
 3878                 ++pathid;
 3879                 /* Start the search over */
 3880                 mtx_lock(&xsoftc.xpt_topo_lock);
 3881                 goto retry;
 3882         }
 3883         return (pathid);
 3884 }
 3885 
 3886 static path_id_t
 3887 xptpathid(const char *sim_name, int sim_unit, int sim_bus)
 3888 {
 3889         path_id_t pathid;
 3890         int i, dunit, val;
 3891         char buf[32];
 3892         const char *dname;
 3893 
 3894         pathid = CAM_XPT_PATH_ID;
 3895         snprintf(buf, sizeof(buf), "%s%d", sim_name, sim_unit);
 3896         i = 0;
 3897         while ((resource_find_match(&i, &dname, &dunit, "at", buf)) == 0) {
 3898                 if (strcmp(dname, "scbus")) {
 3899                         /* Avoid a bit of foot shooting. */
 3900                         continue;
 3901                 }
 3902                 if (dunit < 0)          /* unwired?! */
 3903                         continue;
 3904                 if (resource_int_value("scbus", dunit, "bus", &val) == 0) {
 3905                         if (sim_bus == val) {
 3906                                 pathid = dunit;
 3907                                 break;
 3908                         }
 3909                 } else if (sim_bus == 0) {
 3910                         /* Unspecified matches bus 0 */
 3911                         pathid = dunit;
 3912                         break;
 3913                 } else {
 3914                         printf("Ambiguous scbus configuration for %s%d "
 3915                                "bus %d, cannot wire down.  The kernel "
 3916                                "config entry for scbus%d should "
 3917                                "specify a controller bus.\n"
 3918                                "Scbus will be assigned dynamically.\n",
 3919                                sim_name, sim_unit, sim_bus, dunit);
 3920                         break;
 3921                 }
 3922         }
 3923 
 3924         if (pathid == CAM_XPT_PATH_ID)
 3925                 pathid = xptnextfreepathid();
 3926         return (pathid);
 3927 }
 3928 
 3929 void
 3930 xpt_async(u_int32_t async_code, struct cam_path *path, void *async_arg)
 3931 {
 3932         struct cam_eb *bus;
 3933         struct cam_et *target, *next_target;
 3934         struct cam_ed *device, *next_device;
 3935 
 3936         mtx_assert(path->bus->sim->mtx, MA_OWNED);
 3937 
 3938         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_async\n"));
 3939 
 3940         /*
 3941          * Most async events come from a CAM interrupt context.  In
 3942          * a few cases, the error recovery code at the peripheral layer,
 3943          * which may run from our SWI or a process context, may signal
 3944          * deferred events with a call to xpt_async.
 3945          */
 3946 
 3947         bus = path->bus;
 3948 
 3949         if (async_code == AC_BUS_RESET) {
 3950                 /* Update our notion of when the last reset occurred */
 3951                 microtime(&bus->last_reset);
 3952         }
 3953 
 3954         for (target = TAILQ_FIRST(&bus->et_entries);
 3955              target != NULL;
 3956              target = next_target) {
 3957 
 3958                 next_target = TAILQ_NEXT(target, links);
 3959 
 3960                 if (path->target != target
 3961                  && path->target->target_id != CAM_TARGET_WILDCARD
 3962                  && target->target_id != CAM_TARGET_WILDCARD)
 3963                         continue;
 3964 
 3965                 if (async_code == AC_SENT_BDR) {
 3966                         /* Update our notion of when the last reset occurred */
 3967                         microtime(&path->target->last_reset);
 3968                 }
 3969 
 3970                 for (device = TAILQ_FIRST(&target->ed_entries);
 3971                      device != NULL;
 3972                      device = next_device) {
 3973 
 3974                         next_device = TAILQ_NEXT(device, links);
 3975 
 3976                         if (path->device != device
 3977                          && path->device->lun_id != CAM_LUN_WILDCARD
 3978                          && device->lun_id != CAM_LUN_WILDCARD)
 3979                                 continue;
 3980 
 3981                         (*(bus->xport->async))(async_code, bus,
 3982                                                target, device,
 3983                                                async_arg);
 3984 
 3985                         xpt_async_bcast(&device->asyncs, async_code,
 3986                                         path, async_arg);
 3987                 }
 3988         }
 3989 
 3990         /*
 3991          * If this wasn't a fully wildcarded async, tell all
 3992          * clients that want all async events.
 3993          */
 3994         if (bus != xpt_periph->path->bus)
 3995                 xpt_async_bcast(&xpt_periph->path->device->asyncs, async_code,
 3996                                 path, async_arg);
 3997 }
 3998 
 3999 static void
 4000 xpt_async_bcast(struct async_list *async_head,
 4001                 u_int32_t async_code,
 4002                 struct cam_path *path, void *async_arg)
 4003 {
 4004         struct async_node *cur_entry;
 4005 
 4006         cur_entry = SLIST_FIRST(async_head);
 4007         while (cur_entry != NULL) {
 4008                 struct async_node *next_entry;
 4009                 /*
 4010                  * Grab the next list entry before we call the current
 4011                  * entry's callback.  This is because the callback function
 4012                  * can delete its async callback entry.
 4013                  */
 4014                 next_entry = SLIST_NEXT(cur_entry, links);
 4015                 if ((cur_entry->event_enable & async_code) != 0)
 4016                         cur_entry->callback(cur_entry->callback_arg,
 4017                                             async_code, path,
 4018                                             async_arg);
 4019                 cur_entry = next_entry;
 4020         }
 4021 }
 4022 
 4023 static void
 4024 xpt_dev_async_default(u_int32_t async_code, struct cam_eb *bus,
 4025                       struct cam_et *target, struct cam_ed *device,
 4026                       void *async_arg)
 4027 {
 4028         printf("xpt_dev_async called\n");
 4029 }
 4030 
 4031 u_int32_t
 4032 xpt_freeze_devq(struct cam_path *path, u_int count)
 4033 {
 4034         struct ccb_hdr *ccbh;
 4035 
 4036         mtx_assert(path->bus->sim->mtx, MA_OWNED);
 4037 
 4038         path->device->qfrozen_cnt += count;
 4039 
 4040         /*
 4041          * Mark the last CCB in the queue as needing
 4042          * to be requeued if the driver hasn't
 4043          * changed it's state yet.  This fixes a race
 4044          * where a ccb is just about to be queued to
 4045          * a controller driver when it's interrupt routine
 4046          * freezes the queue.  To completly close the
 4047          * hole, controller drives must check to see
 4048          * if a ccb's status is still CAM_REQ_INPROG
 4049          * just before they queue
 4050          * the CCB.  See ahc_action/ahc_freeze_devq for
 4051          * an example.
 4052          */
 4053         ccbh = TAILQ_LAST(&path->device->ccbq.active_ccbs, ccb_hdr_tailq);
 4054         if (ccbh && ccbh->status == CAM_REQ_INPROG)
 4055                 ccbh->status = CAM_REQUEUE_REQ;
 4056         return (path->device->qfrozen_cnt);
 4057 }
 4058 
 4059 u_int32_t
 4060 xpt_freeze_simq(struct cam_sim *sim, u_int count)
 4061 {
 4062         mtx_assert(sim->mtx, MA_OWNED);
 4063 
 4064         sim->devq->send_queue.qfrozen_cnt += count;
 4065         if (sim->devq->active_dev != NULL) {
 4066                 struct ccb_hdr *ccbh;
 4067 
 4068                 ccbh = TAILQ_LAST(&sim->devq->active_dev->ccbq.active_ccbs,
 4069                                   ccb_hdr_tailq);
 4070                 if (ccbh && ccbh->status == CAM_REQ_INPROG)
 4071                         ccbh->status = CAM_REQUEUE_REQ;
 4072         }
 4073         return (sim->devq->send_queue.qfrozen_cnt);
 4074 }
 4075 
 4076 static void
 4077 xpt_release_devq_timeout(void *arg)
 4078 {
 4079         struct cam_ed *device;
 4080 
 4081         device = (struct cam_ed *)arg;
 4082 
 4083         xpt_release_devq_device(device, /*count*/1, /*run_queue*/TRUE);
 4084 }
 4085 
 4086 void
 4087 xpt_release_devq(struct cam_path *path, u_int count, int run_queue)
 4088 {
 4089         mtx_assert(path->bus->sim->mtx, MA_OWNED);
 4090 
 4091         xpt_release_devq_device(path->device, count, run_queue);
 4092 }
 4093 
 4094 static void
 4095 xpt_release_devq_device(struct cam_ed *dev, u_int count, int run_queue)
 4096 {
 4097         int     rundevq;
 4098 
 4099         rundevq = 0;
 4100         if (dev->qfrozen_cnt > 0) {
 4101 
 4102                 count = (count > dev->qfrozen_cnt) ? dev->qfrozen_cnt : count;
 4103                 dev->qfrozen_cnt -= count;
 4104                 if (dev->qfrozen_cnt == 0) {
 4105 
 4106                         /*
 4107                          * No longer need to wait for a successful
 4108                          * command completion.
 4109                          */
 4110                         dev->flags &= ~CAM_DEV_REL_ON_COMPLETE;
 4111 
 4112                         /*
 4113                          * Remove any timeouts that might be scheduled
 4114                          * to release this queue.
 4115                          */
 4116                         if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
 4117                                 callout_stop(&dev->callout);
 4118                                 dev->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING;
 4119                         }
 4120 
 4121                         /*
 4122                          * Now that we are unfrozen schedule the
 4123                          * device so any pending transactions are
 4124                          * run.
 4125                          */
 4126                         if ((dev->ccbq.queue.entries > 0)
 4127                          && (xpt_schedule_dev_sendq(dev->target->bus, dev))
 4128                          && (run_queue != 0)) {
 4129                                 rundevq = 1;
 4130                         }
 4131                 }
 4132         }
 4133         if (rundevq != 0)
 4134                 xpt_run_dev_sendq(dev->target->bus);
 4135 }
 4136 
 4137 void
 4138 xpt_release_simq(struct cam_sim *sim, int run_queue)
 4139 {
 4140         struct  camq *sendq;
 4141 
 4142         mtx_assert(sim->mtx, MA_OWNED);
 4143 
 4144         sendq = &(sim->devq->send_queue);
 4145         if (sendq->qfrozen_cnt > 0) {
 4146 
 4147                 sendq->qfrozen_cnt--;
 4148                 if (sendq->qfrozen_cnt == 0) {
 4149                         struct cam_eb *bus;
 4150 
 4151                         /*
 4152                          * If there is a timeout scheduled to release this
 4153                          * sim queue, remove it.  The queue frozen count is
 4154                          * already at 0.
 4155                          */
 4156                         if ((sim->flags & CAM_SIM_REL_TIMEOUT_PENDING) != 0){
 4157                                 callout_stop(&sim->callout);
 4158                                 sim->flags &= ~CAM_SIM_REL_TIMEOUT_PENDING;
 4159                         }
 4160                         bus = xpt_find_bus(sim->path_id);
 4161 
 4162                         if (run_queue) {
 4163                                 /*
 4164                                  * Now that we are unfrozen run the send queue.
 4165                                  */
 4166                                 xpt_run_dev_sendq(bus);
 4167                         }
 4168                         xpt_release_bus(bus);
 4169                 }
 4170         }
 4171 }
 4172 
 4173 /*
 4174  * XXX Appears to be unused.
 4175  */
 4176 static void
 4177 xpt_release_simq_timeout(void *arg)
 4178 {
 4179         struct cam_sim *sim;
 4180 
 4181         sim = (struct cam_sim *)arg;
 4182         xpt_release_simq(sim, /* run_queue */ TRUE);
 4183 }
 4184 
 4185 void
 4186 xpt_done(union ccb *done_ccb)
 4187 {
 4188         struct cam_sim *sim;
 4189 
 4190         CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_done\n"));
 4191         if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) != 0) {
 4192                 /*
 4193                  * Queue up the request for handling by our SWI handler
 4194                  * any of the "non-immediate" type of ccbs.
 4195                  */
 4196                 sim = done_ccb->ccb_h.path->bus->sim;
 4197                 switch (done_ccb->ccb_h.path->periph->type) {
 4198                 case CAM_PERIPH_BIO:
 4199                         TAILQ_INSERT_TAIL(&sim->sim_doneq, &done_ccb->ccb_h,
 4200                                           sim_links.tqe);
 4201                         done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX;
 4202                         if ((sim->flags & CAM_SIM_ON_DONEQ) == 0) {
 4203                                 mtx_lock(&cam_simq_lock);
 4204                                 TAILQ_INSERT_TAIL(&cam_simq, sim,
 4205                                                   links);
 4206                                 sim->flags |= CAM_SIM_ON_DONEQ;
 4207                                 mtx_unlock(&cam_simq_lock);
 4208                         }
 4209                         if ((done_ccb->ccb_h.path->periph->flags &
 4210                             CAM_PERIPH_POLLED) == 0)
 4211                                 swi_sched(cambio_ih, 0);
 4212                         break;
 4213                 default:
 4214                         panic("unknown periph type %d",
 4215                             done_ccb->ccb_h.path->periph->type);
 4216                 }
 4217         }
 4218 }
 4219 
 4220 union ccb *
 4221 xpt_alloc_ccb()
 4222 {
 4223         union ccb *new_ccb;
 4224 
 4225         new_ccb = malloc(sizeof(*new_ccb), M_CAMXPT, M_ZERO|M_WAITOK);
 4226         return (new_ccb);
 4227 }
 4228 
 4229 union ccb *
 4230 xpt_alloc_ccb_nowait()
 4231 {
 4232         union ccb *new_ccb;
 4233 
 4234         new_ccb = malloc(sizeof(*new_ccb), M_CAMXPT, M_ZERO|M_NOWAIT);
 4235         return (new_ccb);
 4236 }
 4237 
 4238 void
 4239 xpt_free_ccb(union ccb *free_ccb)
 4240 {
 4241         free(free_ccb, M_CAMXPT);
 4242 }
 4243 
 4244 
 4245 
 4246 /* Private XPT functions */
 4247 
 4248 /*
 4249  * Get a CAM control block for the caller. Charge the structure to the device
 4250  * referenced by the path.  If the this device has no 'credits' then the
 4251  * device already has the maximum number of outstanding operations under way
 4252  * and we return NULL. If we don't have sufficient resources to allocate more
 4253  * ccbs, we also return NULL.
 4254  */
 4255 static union ccb *
 4256 xpt_get_ccb(struct cam_ed *device)
 4257 {
 4258         union ccb *new_ccb;
 4259         struct cam_sim *sim;
 4260 
 4261         sim = device->sim;
 4262         if ((new_ccb = (union ccb *)SLIST_FIRST(&sim->ccb_freeq)) == NULL) {
 4263                 new_ccb = xpt_alloc_ccb_nowait();
 4264                 if (new_ccb == NULL) {
 4265                         return (NULL);
 4266                 }
 4267                 if ((sim->flags & CAM_SIM_MPSAFE) == 0)
 4268                         callout_handle_init(&new_ccb->ccb_h.timeout_ch);
 4269                 SLIST_INSERT_HEAD(&sim->ccb_freeq, &new_ccb->ccb_h,
 4270                                   xpt_links.sle);
 4271                 sim->ccb_count++;
 4272         }
 4273         cam_ccbq_take_opening(&device->ccbq);
 4274         SLIST_REMOVE_HEAD(&sim->ccb_freeq, xpt_links.sle);
 4275         return (new_ccb);
 4276 }
 4277 
 4278 static void
 4279 xpt_release_bus(struct cam_eb *bus)
 4280 {
 4281 
 4282         if ((--bus->refcount == 0)
 4283          && (TAILQ_FIRST(&bus->et_entries) == NULL)) {
 4284                 mtx_lock(&xsoftc.xpt_topo_lock);
 4285                 TAILQ_REMOVE(&xsoftc.xpt_busses, bus, links);
 4286                 xsoftc.bus_generation++;
 4287                 mtx_unlock(&xsoftc.xpt_topo_lock);
 4288                 cam_sim_release(bus->sim);
 4289                 free(bus, M_CAMXPT);
 4290         }
 4291 }
 4292 
 4293 static struct cam_et *
 4294 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id)
 4295 {
 4296         struct cam_et *target;
 4297 
 4298         target = (struct cam_et *)malloc(sizeof(*target), M_CAMXPT, M_NOWAIT);
 4299         if (target != NULL) {
 4300                 struct cam_et *cur_target;
 4301 
 4302                 TAILQ_INIT(&target->ed_entries);
 4303                 target->bus = bus;
 4304                 target->target_id = target_id;
 4305                 target->refcount = 1;
 4306                 target->generation = 0;
 4307                 timevalclear(&target->last_reset);
 4308                 /*
 4309                  * Hold a reference to our parent bus so it
 4310                  * will not go away before we do.
 4311                  */
 4312                 bus->refcount++;
 4313 
 4314                 /* Insertion sort into our bus's target list */
 4315                 cur_target = TAILQ_FIRST(&bus->et_entries);
 4316                 while (cur_target != NULL && cur_target->target_id < target_id)
 4317                         cur_target = TAILQ_NEXT(cur_target, links);
 4318 
 4319                 if (cur_target != NULL) {
 4320                         TAILQ_INSERT_BEFORE(cur_target, target, links);
 4321                 } else {
 4322                         TAILQ_INSERT_TAIL(&bus->et_entries, target, links);
 4323                 }
 4324                 bus->generation++;
 4325         }
 4326         return (target);
 4327 }
 4328 
 4329 static void
 4330 xpt_release_target(struct cam_eb *bus, struct cam_et *target)
 4331 {
 4332 
 4333         if ((--target->refcount == 0)
 4334          && (TAILQ_FIRST(&target->ed_entries) == NULL)) {
 4335                 TAILQ_REMOVE(&bus->et_entries, target, links);
 4336                 bus->generation++;
 4337                 free(target, M_CAMXPT);
 4338                 xpt_release_bus(bus);
 4339         }
 4340 }
 4341 
 4342 static struct cam_ed *
 4343 xpt_alloc_device_default(struct cam_eb *bus, struct cam_et *target,
 4344                          lun_id_t lun_id)
 4345 {
 4346         struct cam_ed *device, *cur_device;
 4347 
 4348         device = xpt_alloc_device(bus, target, lun_id);
 4349         if (device == NULL)
 4350                 return (NULL);
 4351 
 4352         device->mintags = 1;
 4353         device->maxtags = 1;
 4354         bus->sim->max_ccbs = device->ccbq.devq_openings;
 4355         cur_device = TAILQ_FIRST(&target->ed_entries);
 4356         while (cur_device != NULL && cur_device->lun_id < lun_id)
 4357                 cur_device = TAILQ_NEXT(cur_device, links);
 4358         if (cur_device != NULL) {
 4359                 TAILQ_INSERT_BEFORE(cur_device, device, links);
 4360         } else {
 4361                 TAILQ_INSERT_TAIL(&target->ed_entries, device, links);
 4362         }
 4363         target->generation++;
 4364 
 4365         return (device);
 4366 }
 4367 
 4368 struct cam_ed *
 4369 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id)
 4370 {
 4371         struct     cam_ed *device;
 4372         struct     cam_devq *devq;
 4373         cam_status status;
 4374 
 4375         /* Make space for us in the device queue on our bus */
 4376         devq = bus->sim->devq;
 4377         status = cam_devq_resize(devq, devq->alloc_queue.array_size + 1);
 4378 
 4379         if (status != CAM_REQ_CMP) {
 4380                 device = NULL;
 4381         } else {
 4382                 device = (struct cam_ed *)malloc(sizeof(*device),
 4383                                                  M_CAMXPT, M_NOWAIT);
 4384         }
 4385 
 4386         if (device != NULL) {
 4387                 cam_init_pinfo(&device->alloc_ccb_entry.pinfo);
 4388                 device->alloc_ccb_entry.device = device;
 4389                 cam_init_pinfo(&device->send_ccb_entry.pinfo);
 4390                 device->send_ccb_entry.device = device;
 4391                 device->target = target;
 4392                 device->lun_id = lun_id;
 4393                 device->sim = bus->sim;
 4394                 /* Initialize our queues */
 4395                 if (camq_init(&device->drvq, 0) != 0) {
 4396                         free(device, M_CAMXPT);
 4397                         return (NULL);
 4398                 }
 4399                 if (cam_ccbq_init(&device->ccbq,
 4400                                   bus->sim->max_dev_openings) != 0) {
 4401                         camq_fini(&device->drvq);
 4402                         free(device, M_CAMXPT);
 4403                         return (NULL);
 4404                 }
 4405                 SLIST_INIT(&device->asyncs);
 4406                 SLIST_INIT(&device->periphs);
 4407                 device->generation = 0;
 4408                 device->owner = NULL;
 4409                 device->qfrozen_cnt = 0;
 4410                 device->flags = CAM_DEV_UNCONFIGURED;
 4411                 device->tag_delay_count = 0;
 4412                 device->tag_saved_openings = 0;
 4413                 device->refcount = 1;
 4414                 if (bus->sim->flags & CAM_SIM_MPSAFE)
 4415                         callout_init_mtx(&device->callout, bus->sim->mtx, 0);
 4416                 else
 4417                         callout_init_mtx(&device->callout, &Giant, 0);
 4418 
 4419                 /*
 4420                  * Hold a reference to our parent target so it
 4421                  * will not go away before we do.
 4422                  */
 4423                 target->refcount++;
 4424 
 4425         }
 4426         return (device);
 4427 }
 4428 
 4429 static void
 4430 xpt_release_device(struct cam_eb *bus, struct cam_et *target,
 4431                    struct cam_ed *device)
 4432 {
 4433 
 4434         if ((--device->refcount == 0)
 4435          && ((device->flags & CAM_DEV_UNCONFIGURED) != 0)) {
 4436                 struct cam_devq *devq;
 4437 
 4438                 if (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX
 4439                  || device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX)
 4440                         panic("Removing device while still queued for ccbs");
 4441 
 4442                 if ((device->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0)
 4443                                 callout_stop(&device->callout);
 4444 
 4445                 TAILQ_REMOVE(&target->ed_entries, device,links);
 4446                 target->generation++;
 4447                 bus->sim->max_ccbs -= device->ccbq.devq_openings;
 4448                 /* Release our slot in the devq */
 4449                 devq = bus->sim->devq;
 4450                 cam_devq_resize(devq, devq->alloc_queue.array_size - 1);
 4451                 camq_fini(&device->drvq);
 4452                 camq_fini(&device->ccbq.queue);
 4453                 free(device, M_CAMXPT);
 4454                 xpt_release_target(bus, target);
 4455         }
 4456 }
 4457 
 4458 u_int32_t
 4459 xpt_dev_ccbq_resize(struct cam_path *path, int newopenings)
 4460 {
 4461         int     diff;
 4462         int     result;
 4463         struct  cam_ed *dev;
 4464 
 4465         dev = path->device;
 4466 
 4467         diff = newopenings - (dev->ccbq.dev_active + dev->ccbq.dev_openings);
 4468         result = cam_ccbq_resize(&dev->ccbq, newopenings);
 4469         if (result == CAM_REQ_CMP && (diff < 0)) {
 4470                 dev->flags |= CAM_DEV_RESIZE_QUEUE_NEEDED;
 4471         }
 4472         if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
 4473          || (dev->inq_flags & SID_CmdQue) != 0)
 4474                 dev->tag_saved_openings = newopenings;
 4475         /* Adjust the global limit */
 4476         dev->sim->max_ccbs += diff;
 4477         return (result);
 4478 }
 4479 
 4480 static struct cam_eb *
 4481 xpt_find_bus(path_id_t path_id)
 4482 {
 4483         struct cam_eb *bus;
 4484 
 4485         mtx_lock(&xsoftc.xpt_topo_lock);
 4486         for (bus = TAILQ_FIRST(&xsoftc.xpt_busses);
 4487              bus != NULL;
 4488              bus = TAILQ_NEXT(bus, links)) {
 4489                 if (bus->path_id == path_id) {
 4490                         bus->refcount++;
 4491                         break;
 4492                 }
 4493         }
 4494         mtx_unlock(&xsoftc.xpt_topo_lock);
 4495         return (bus);
 4496 }
 4497 
 4498 static struct cam_et *
 4499 xpt_find_target(struct cam_eb *bus, target_id_t target_id)
 4500 {
 4501         struct cam_et *target;
 4502 
 4503         for (target = TAILQ_FIRST(&bus->et_entries);
 4504              target != NULL;
 4505              target = TAILQ_NEXT(target, links)) {
 4506                 if (target->target_id == target_id) {
 4507                         target->refcount++;
 4508                         break;
 4509                 }
 4510         }
 4511         return (target);
 4512 }
 4513 
 4514 static struct cam_ed *
 4515 xpt_find_device(struct cam_et *target, lun_id_t lun_id)
 4516 {
 4517         struct cam_ed *device;
 4518 
 4519         for (device = TAILQ_FIRST(&target->ed_entries);
 4520              device != NULL;
 4521              device = TAILQ_NEXT(device, links)) {
 4522                 if (device->lun_id == lun_id) {
 4523                         device->refcount++;
 4524                         break;
 4525                 }
 4526         }
 4527         return (device);
 4528 }
 4529 
 4530 static void
 4531 xpt_start_tags(struct cam_path *path)
 4532 {
 4533         struct ccb_relsim crs;
 4534         struct cam_ed *device;
 4535         struct cam_sim *sim;
 4536         int    newopenings;
 4537 
 4538         device = path->device;
 4539         sim = path->bus->sim;
 4540         device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
 4541         xpt_freeze_devq(path, /*count*/1);
 4542         device->inq_flags |= SID_CmdQue;
 4543         if (device->tag_saved_openings != 0)
 4544                 newopenings = device->tag_saved_openings;
 4545         else
 4546                 newopenings = min(device->maxtags,
 4547                                   sim->max_tagged_dev_openings);
 4548         xpt_dev_ccbq_resize(path, newopenings);
 4549         xpt_setup_ccb(&crs.ccb_h, path, /*priority*/1);
 4550         crs.ccb_h.func_code = XPT_REL_SIMQ;
 4551         crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
 4552         crs.openings
 4553             = crs.release_timeout
 4554             = crs.qfrozen_cnt
 4555             = 0;
 4556         xpt_action((union ccb *)&crs);
 4557 }
 4558 
 4559 static int busses_to_config;
 4560 static int busses_to_reset;
 4561 
 4562 static int
 4563 xptconfigbuscountfunc(struct cam_eb *bus, void *arg)
 4564 {
 4565 
 4566         mtx_assert(bus->sim->mtx, MA_OWNED);
 4567 
 4568         if (bus->path_id != CAM_XPT_PATH_ID) {
 4569                 struct cam_path path;
 4570                 struct ccb_pathinq cpi;
 4571                 int can_negotiate;
 4572 
 4573                 busses_to_config++;
 4574                 xpt_compile_path(&path, NULL, bus->path_id,
 4575                                  CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
 4576                 xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
 4577                 cpi.ccb_h.func_code = XPT_PATH_INQ;
 4578                 xpt_action((union ccb *)&cpi);
 4579                 can_negotiate = cpi.hba_inquiry;
 4580                 can_negotiate &= (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE);
 4581                 if ((cpi.hba_misc & PIM_NOBUSRESET) == 0
 4582                  && can_negotiate)
 4583                         busses_to_reset++;
 4584                 xpt_release_path(&path);
 4585         }
 4586 
 4587         return(1);
 4588 }
 4589 
 4590 static int
 4591 xptconfigfunc(struct cam_eb *bus, void *arg)
 4592 {
 4593         struct  cam_path *path;
 4594         union   ccb *work_ccb;
 4595 
 4596         mtx_assert(bus->sim->mtx, MA_OWNED);
 4597 
 4598         if (bus->path_id != CAM_XPT_PATH_ID) {
 4599                 cam_status status;
 4600                 int can_negotiate;
 4601 
 4602                 work_ccb = xpt_alloc_ccb_nowait();
 4603                 if (work_ccb == NULL) {
 4604                         busses_to_config--;
 4605                         xpt_finishconfig(xpt_periph, NULL);
 4606                         return(0);
 4607                 }
 4608                 if ((status = xpt_create_path(&path, xpt_periph, bus->path_id,
 4609                                               CAM_TARGET_WILDCARD,
 4610                                               CAM_LUN_WILDCARD)) !=CAM_REQ_CMP){
 4611                         printf("xptconfigfunc: xpt_create_path failed with "
 4612                                "status %#x for bus %d\n", status, bus->path_id);
 4613                         printf("xptconfigfunc: halting bus configuration\n");
 4614                         xpt_free_ccb(work_ccb);
 4615                         busses_to_config--;
 4616                         xpt_finishconfig(xpt_periph, NULL);
 4617                         return(0);
 4618                 }
 4619                 xpt_setup_ccb(&work_ccb->ccb_h, path, /*priority*/1);
 4620                 work_ccb->ccb_h.func_code = XPT_PATH_INQ;
 4621                 xpt_action(work_ccb);
 4622                 if (work_ccb->ccb_h.status != CAM_REQ_CMP) {
 4623                         printf("xptconfigfunc: CPI failed on bus %d "
 4624                                "with status %d\n", bus->path_id,
 4625                                work_ccb->ccb_h.status);
 4626                         xpt_finishconfig(xpt_periph, work_ccb);
 4627                         return(1);
 4628                 }
 4629 
 4630                 can_negotiate = work_ccb->cpi.hba_inquiry;
 4631                 can_negotiate &= (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE);
 4632                 if ((work_ccb->cpi.hba_misc & PIM_NOBUSRESET) == 0
 4633                  && (can_negotiate != 0)) {
 4634                         xpt_setup_ccb(&work_ccb->ccb_h, path, /*priority*/1);
 4635                         work_ccb->ccb_h.func_code = XPT_RESET_BUS;
 4636                         work_ccb->ccb_h.cbfcnp = NULL;
 4637                         CAM_DEBUG(path, CAM_DEBUG_SUBTRACE,
 4638                                   ("Resetting Bus\n"));
 4639                         xpt_action(work_ccb);
 4640                         xpt_finishconfig(xpt_periph, work_ccb);
 4641                 } else {
 4642                         /* Act as though we performed a successful BUS RESET */
 4643                         work_ccb->ccb_h.func_code = XPT_RESET_BUS;
 4644                         xpt_finishconfig(xpt_periph, work_ccb);
 4645                 }
 4646         }
 4647 
 4648         return(1);
 4649 }
 4650 
 4651 static void
 4652 xpt_config(void *arg)
 4653 {
 4654         /*
 4655          * Now that interrupts are enabled, go find our devices
 4656          */
 4657 
 4658 #ifdef CAMDEBUG
 4659         /* Setup debugging flags and path */
 4660 #ifdef CAM_DEBUG_FLAGS
 4661         cam_dflags = CAM_DEBUG_FLAGS;
 4662 #else /* !CAM_DEBUG_FLAGS */
 4663         cam_dflags = CAM_DEBUG_NONE;
 4664 #endif /* CAM_DEBUG_FLAGS */
 4665 #ifdef CAM_DEBUG_BUS
 4666         if (cam_dflags != CAM_DEBUG_NONE) {
 4667                 /*
 4668                  * Locking is specifically omitted here.  No SIMs have
 4669                  * registered yet, so xpt_create_path will only be searching
 4670                  * empty lists of targets and devices.
 4671                  */
 4672                 if (xpt_create_path(&cam_dpath, xpt_periph,
 4673                                     CAM_DEBUG_BUS, CAM_DEBUG_TARGET,
 4674                                     CAM_DEBUG_LUN) != CAM_REQ_CMP) {
 4675                         printf("xpt_config: xpt_create_path() failed for debug"
 4676                                " target %d:%d:%d, debugging disabled\n",
 4677                                CAM_DEBUG_BUS, CAM_DEBUG_TARGET, CAM_DEBUG_LUN);
 4678                         cam_dflags = CAM_DEBUG_NONE;
 4679                 }
 4680         } else
 4681                 cam_dpath = NULL;
 4682 #else /* !CAM_DEBUG_BUS */
 4683         cam_dpath = NULL;
 4684 #endif /* CAM_DEBUG_BUS */
 4685 #endif /* CAMDEBUG */
 4686 
 4687         /*
 4688          * Scan all installed busses.
 4689          */
 4690         xpt_for_all_busses(xptconfigbuscountfunc, NULL);
 4691 
 4692         if (busses_to_config == 0) {
 4693                 /* Call manually because we don't have any busses */
 4694                 xpt_finishconfig(xpt_periph, NULL);
 4695         } else  {
 4696                 if (busses_to_reset > 0 && scsi_delay >= 2000) {
 4697                         printf("Waiting %d seconds for SCSI "
 4698                                "devices to settle\n", scsi_delay/1000);
 4699                 }
 4700                 xpt_for_all_busses(xptconfigfunc, NULL);
 4701         }
 4702 }
 4703 
 4704 /*
 4705  * If the given device only has one peripheral attached to it, and if that
 4706  * peripheral is the passthrough driver, announce it.  This insures that the
 4707  * user sees some sort of announcement for every peripheral in their system.
 4708  */
 4709 static int
 4710 xptpassannouncefunc(struct cam_ed *device, void *arg)
 4711 {
 4712         struct cam_periph *periph;
 4713         int i;
 4714 
 4715         for (periph = SLIST_FIRST(&device->periphs), i = 0; periph != NULL;
 4716              periph = SLIST_NEXT(periph, periph_links), i++);
 4717 
 4718         periph = SLIST_FIRST(&device->periphs);
 4719         if ((i == 1)
 4720          && (strncmp(periph->periph_name, "pass", 4) == 0))
 4721                 xpt_announce_periph(periph, NULL);
 4722 
 4723         return(1);
 4724 }
 4725 
 4726 static void
 4727 xpt_finishconfig_task(void *context, int pending)
 4728 {
 4729         struct  periph_driver **p_drv;
 4730         int     i;
 4731 
 4732         if (busses_to_config == 0) {
 4733                 /* Register all the peripheral drivers */
 4734                 /* XXX This will have to change when we have loadable modules */
 4735                 p_drv = periph_drivers;
 4736                 for (i = 0; p_drv[i] != NULL; i++) {
 4737                         (*p_drv[i]->init)();
 4738                 }
 4739 
 4740                 /*
 4741                  * Check for devices with no "standard" peripheral driver
 4742                  * attached.  For any devices like that, announce the
 4743                  * passthrough driver so the user will see something.
 4744                  */
 4745                 xpt_for_all_devices(xptpassannouncefunc, NULL);
 4746 
 4747                 /* Release our hook so that the boot can continue. */
 4748                 config_intrhook_disestablish(xsoftc.xpt_config_hook);
 4749                 free(xsoftc.xpt_config_hook, M_CAMXPT);
 4750                 xsoftc.xpt_config_hook = NULL;
 4751         }
 4752 
 4753         free(context, M_CAMXPT);
 4754 }
 4755 
 4756 static void
 4757 xpt_finishconfig(struct cam_periph *periph, union ccb *done_ccb)
 4758 {
 4759         struct  xpt_task *task;
 4760 
 4761         if (done_ccb != NULL) {
 4762                 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE,
 4763                           ("xpt_finishconfig\n"));
 4764                 switch(done_ccb->ccb_h.func_code) {
 4765                 case XPT_RESET_BUS:
 4766                         if (done_ccb->ccb_h.status == CAM_REQ_CMP) {
 4767                                 done_ccb->ccb_h.func_code = XPT_SCAN_BUS;
 4768                                 done_ccb->ccb_h.cbfcnp = xpt_finishconfig;
 4769                                 done_ccb->crcn.flags = 0;
 4770                                 xpt_action(done_ccb);
 4771                                 return;
 4772                         }
 4773                         /* FALLTHROUGH */
 4774                 case XPT_SCAN_BUS:
 4775                 default:
 4776                         xpt_free_path(done_ccb->ccb_h.path);
 4777                         busses_to_config--;
 4778                         break;
 4779                 }
 4780         }
 4781 
 4782         if (busses_to_config == 0) {
 4783                 task = malloc(sizeof(struct xpt_task), M_CAMXPT, M_NOWAIT);
 4784                 if (task != NULL) {
 4785                         TASK_INIT(&task->task, 0, xpt_finishconfig_task, task);
 4786                         taskqueue_enqueue(taskqueue_thread, &task->task);
 4787                 }
 4788         }
 4789 
 4790         if (done_ccb != NULL)
 4791                 xpt_free_ccb(done_ccb);
 4792 }
 4793 
 4794 cam_status
 4795 xpt_register_async(int event, ac_callback_t *cbfunc, void *cbarg,
 4796                    struct cam_path *path)
 4797 {
 4798         struct ccb_setasync csa;
 4799         cam_status status;
 4800         int xptpath = 0;
 4801 
 4802         if (path == NULL) {
 4803                 mtx_lock(&xsoftc.xpt_lock);
 4804                 status = xpt_create_path(&path, /*periph*/NULL, CAM_XPT_PATH_ID,
 4805                                          CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
 4806                 if (status != CAM_REQ_CMP) {
 4807                         mtx_unlock(&xsoftc.xpt_lock);
 4808                         return (status);
 4809                 }
 4810                 xptpath = 1;
 4811         }
 4812 
 4813         xpt_setup_ccb(&csa.ccb_h, path, /*priority*/5);
 4814         csa.ccb_h.func_code = XPT_SASYNC_CB;
 4815         csa.event_enable = event;
 4816         csa.callback = cbfunc;
 4817         csa.callback_arg = cbarg;
 4818         xpt_action((union ccb *)&csa);
 4819         status = csa.ccb_h.status;
 4820         if (xptpath) {
 4821                 xpt_free_path(path);
 4822                 mtx_unlock(&xsoftc.xpt_lock);
 4823         }
 4824         return (status);
 4825 }
 4826 
 4827 static void
 4828 xptaction(struct cam_sim *sim, union ccb *work_ccb)
 4829 {
 4830         CAM_DEBUG(work_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xptaction\n"));
 4831 
 4832         switch (work_ccb->ccb_h.func_code) {
 4833         /* Common cases first */
 4834         case XPT_PATH_INQ:              /* Path routing inquiry */
 4835         {
 4836                 struct ccb_pathinq *cpi;
 4837 
 4838                 cpi = &work_ccb->cpi;
 4839                 cpi->version_num = 1; /* XXX??? */
 4840                 cpi->hba_inquiry = 0;
 4841                 cpi->target_sprt = 0;
 4842                 cpi->hba_misc = 0;
 4843                 cpi->hba_eng_cnt = 0;
 4844                 cpi->max_target = 0;
 4845                 cpi->max_lun = 0;
 4846                 cpi->initiator_id = 0;
 4847                 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
 4848                 strncpy(cpi->hba_vid, "", HBA_IDLEN);
 4849                 strncpy(cpi->dev_name, sim->sim_name, DEV_IDLEN);
 4850                 cpi->unit_number = sim->unit_number;
 4851                 cpi->bus_id = sim->bus_id;
 4852                 cpi->base_transfer_speed = 0;
 4853                 cpi->protocol = PROTO_UNSPECIFIED;
 4854                 cpi->protocol_version = PROTO_VERSION_UNSPECIFIED;
 4855                 cpi->transport = XPORT_UNSPECIFIED;
 4856                 cpi->transport_version = XPORT_VERSION_UNSPECIFIED;
 4857                 cpi->ccb_h.status = CAM_REQ_CMP;
 4858                 xpt_done(work_ccb);
 4859                 break;
 4860         }
 4861         default:
 4862                 work_ccb->ccb_h.status = CAM_REQ_INVALID;
 4863                 xpt_done(work_ccb);
 4864                 break;
 4865         }
 4866 }
 4867 
 4868 /*
 4869  * The xpt as a "controller" has no interrupt sources, so polling
 4870  * is a no-op.
 4871  */
 4872 static void
 4873 xptpoll(struct cam_sim *sim)
 4874 {
 4875 }
 4876 
 4877 void
 4878 xpt_lock_buses(void)
 4879 {
 4880         mtx_lock(&xsoftc.xpt_topo_lock);
 4881 }
 4882 
 4883 void
 4884 xpt_unlock_buses(void)
 4885 {
 4886         mtx_unlock(&xsoftc.xpt_topo_lock);
 4887 }
 4888 
 4889 static void
 4890 camisr(void *dummy)
 4891 {
 4892         cam_simq_t queue;
 4893         struct cam_sim *sim;
 4894 
 4895         mtx_lock(&cam_simq_lock);
 4896         TAILQ_INIT(&queue);
 4897         TAILQ_CONCAT(&queue, &cam_simq, links);
 4898         mtx_unlock(&cam_simq_lock);
 4899 
 4900         while ((sim = TAILQ_FIRST(&queue)) != NULL) {
 4901                 TAILQ_REMOVE(&queue, sim, links);
 4902                 CAM_SIM_LOCK(sim);
 4903                 sim->flags &= ~CAM_SIM_ON_DONEQ;
 4904                 camisr_runqueue(&sim->sim_doneq);
 4905                 CAM_SIM_UNLOCK(sim);
 4906         }
 4907 }
 4908 
 4909 static void
 4910 camisr_runqueue(void *V_queue)
 4911 {
 4912         cam_isrq_t *queue = V_queue;
 4913         struct  ccb_hdr *ccb_h;
 4914 
 4915         while ((ccb_h = TAILQ_FIRST(queue)) != NULL) {
 4916                 int     runq;
 4917 
 4918                 TAILQ_REMOVE(queue, ccb_h, sim_links.tqe);
 4919                 ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
 4920 
 4921                 CAM_DEBUG(ccb_h->path, CAM_DEBUG_TRACE,
 4922                           ("camisr\n"));
 4923 
 4924                 runq = FALSE;
 4925 
 4926                 if (ccb_h->flags & CAM_HIGH_POWER) {
 4927                         struct highpowerlist    *hphead;
 4928                         union ccb               *send_ccb;
 4929 
 4930                         mtx_lock(&xsoftc.xpt_lock);
 4931                         hphead = &xsoftc.highpowerq;
 4932 
 4933                         send_ccb = (union ccb *)STAILQ_FIRST(hphead);
 4934 
 4935                         /*
 4936                          * Increment the count since this command is done.
 4937                          */
 4938                         xsoftc.num_highpower++;
 4939 
 4940                         /*
 4941                          * Any high powered commands queued up?
 4942                          */
 4943                         if (send_ccb != NULL) {
 4944 
 4945                                 STAILQ_REMOVE_HEAD(hphead, xpt_links.stqe);
 4946                                 mtx_unlock(&xsoftc.xpt_lock);
 4947 
 4948                                 xpt_release_devq(send_ccb->ccb_h.path,
 4949                                                  /*count*/1, /*runqueue*/TRUE);
 4950                         } else
 4951                                 mtx_unlock(&xsoftc.xpt_lock);
 4952                 }
 4953 
 4954                 if ((ccb_h->func_code & XPT_FC_USER_CCB) == 0) {
 4955                         struct cam_ed *dev;
 4956 
 4957                         dev = ccb_h->path->device;
 4958 
 4959                         cam_ccbq_ccb_done(&dev->ccbq, (union ccb *)ccb_h);
 4960                         ccb_h->path->bus->sim->devq->send_active--;
 4961                         ccb_h->path->bus->sim->devq->send_openings++;
 4962 
 4963                         if (((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0
 4964                           && (ccb_h->status&CAM_STATUS_MASK) != CAM_REQUEUE_REQ)
 4965                          || ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
 4966                           && (dev->ccbq.dev_active == 0))) {
 4967 
 4968                                 xpt_release_devq(ccb_h->path, /*count*/1,
 4969                                                  /*run_queue*/TRUE);
 4970                         }
 4971 
 4972                         if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
 4973                          && (--dev->tag_delay_count == 0))
 4974                                 xpt_start_tags(ccb_h->path);
 4975 
 4976                         if ((dev->ccbq.queue.entries > 0)
 4977                          && (dev->qfrozen_cnt == 0)
 4978                          && (device_is_send_queued(dev) == 0)) {
 4979                                 runq = xpt_schedule_dev_sendq(ccb_h->path->bus,
 4980                                                               dev);
 4981                         }
 4982                 }
 4983 
 4984                 if (ccb_h->status & CAM_RELEASE_SIMQ) {
 4985                         xpt_release_simq(ccb_h->path->bus->sim,
 4986                                          /*run_queue*/TRUE);
 4987                         ccb_h->status &= ~CAM_RELEASE_SIMQ;
 4988                         runq = FALSE;
 4989                 }
 4990 
 4991                 if ((ccb_h->flags & CAM_DEV_QFRZDIS)
 4992                  && (ccb_h->status & CAM_DEV_QFRZN)) {
 4993                         xpt_release_devq(ccb_h->path, /*count*/1,
 4994                                          /*run_queue*/TRUE);
 4995                         ccb_h->status &= ~CAM_DEV_QFRZN;
 4996                 } else if (runq) {
 4997                         xpt_run_dev_sendq(ccb_h->path->bus);
 4998                 }
 4999 
 5000                 /* Call the peripheral driver's callback */
 5001                 (*ccb_h->cbfcnp)(ccb_h->path->periph, (union ccb *)ccb_h);
 5002         }
 5003 }
 5004 

Cache object: 86f9d57a02cb3d1a78c56ae3fc6153e9


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.