The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/cam/cam_xpt.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Implementation of the Common Access Method Transport (XPT) layer.
    3  *
    4  * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs.
    5  * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry.
    6  * All rights reserved.
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions, and the following disclaimer,
   13  *    without modification, immediately at the beginning of the file.
   14  * 2. The name of the author may not be used to endorse or promote products
   15  *    derived from this software without specific prior written permission.
   16  *
   17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   20  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
   21  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   27  * SUCH DAMAGE.
   28  */
   29 
   30 #include <sys/cdefs.h>
   31 __FBSDID("$FreeBSD: releng/8.4/sys/cam/cam_xpt.c 246432 2013-02-06 18:41:12Z mav $");
   32 
   33 #include <sys/param.h>
   34 #include <sys/bus.h>
   35 #include <sys/systm.h>
   36 #include <sys/types.h>
   37 #include <sys/malloc.h>
   38 #include <sys/kernel.h>
   39 #include <sys/time.h>
   40 #include <sys/conf.h>
   41 #include <sys/fcntl.h>
   42 #include <sys/reboot.h>
   43 #include <sys/interrupt.h>
   44 #include <sys/sbuf.h>
   45 #include <sys/taskqueue.h>
   46 
   47 #include <sys/lock.h>
   48 #include <sys/mutex.h>
   49 #include <sys/sysctl.h>
   50 #include <sys/kthread.h>
   51 
   52 #include <cam/cam.h>
   53 #include <cam/cam_ccb.h>
   54 #include <cam/cam_periph.h>
   55 #include <cam/cam_queue.h>
   56 #include <cam/cam_sim.h>
   57 #include <cam/cam_xpt.h>
   58 #include <cam/cam_xpt_sim.h>
   59 #include <cam/cam_xpt_periph.h>
   60 #include <cam/cam_xpt_internal.h>
   61 #include <cam/cam_debug.h>
   62 
   63 #include <cam/scsi/scsi_all.h>
   64 #include <cam/scsi/scsi_message.h>
   65 #include <cam/scsi/scsi_pass.h>
   66 
   67 #include <machine/md_var.h>     /* geometry translation */
   68 #include <machine/stdarg.h>     /* for xpt_print below */
   69 
   70 #include "opt_cam.h"
   71 
   72 /*
   73  * This is the maximum number of high powered commands (e.g. start unit)
   74  * that can be outstanding at a particular time.
   75  */
   76 #ifndef CAM_MAX_HIGHPOWER
   77 #define CAM_MAX_HIGHPOWER  4
   78 #endif
   79 
   80 /* Datastructures internal to the xpt layer */
   81 MALLOC_DEFINE(M_CAMXPT, "CAM XPT", "CAM XPT buffers");
   82 
   83 /* Object for defering XPT actions to a taskqueue */
   84 struct xpt_task {
   85         struct task     task;
   86         void            *data1;
   87         uintptr_t       data2;
   88 };
   89 
   90 typedef enum {
   91         XPT_FLAG_OPEN           = 0x01
   92 } xpt_flags;
   93 
   94 struct xpt_softc {
   95         xpt_flags               flags;
   96         u_int32_t               xpt_generation;
   97 
   98         /* number of high powered commands that can go through right now */
   99         STAILQ_HEAD(highpowerlist, ccb_hdr)     highpowerq;
  100         int                     num_highpower;
  101 
  102         /* queue for handling async rescan requests. */
  103         TAILQ_HEAD(, ccb_hdr) ccb_scanq;
  104         int buses_to_config;
  105         int buses_config_done;
  106 
  107         /* Registered busses */
  108         TAILQ_HEAD(,cam_eb)     xpt_busses;
  109         u_int                   bus_generation;
  110 
  111         struct intr_config_hook *xpt_config_hook;
  112 
  113         int                     boot_delay;
  114         struct callout          boot_callout;
  115 
  116         struct mtx              xpt_topo_lock;
  117         struct mtx              xpt_lock;
  118 };
  119 
  120 typedef enum {
  121         DM_RET_COPY             = 0x01,
  122         DM_RET_FLAG_MASK        = 0x0f,
  123         DM_RET_NONE             = 0x00,
  124         DM_RET_STOP             = 0x10,
  125         DM_RET_DESCEND          = 0x20,
  126         DM_RET_ERROR            = 0x30,
  127         DM_RET_ACTION_MASK      = 0xf0
  128 } dev_match_ret;
  129 
  130 typedef enum {
  131         XPT_DEPTH_BUS,
  132         XPT_DEPTH_TARGET,
  133         XPT_DEPTH_DEVICE,
  134         XPT_DEPTH_PERIPH
  135 } xpt_traverse_depth;
  136 
  137 struct xpt_traverse_config {
  138         xpt_traverse_depth      depth;
  139         void                    *tr_func;
  140         void                    *tr_arg;
  141 };
  142 
  143 typedef int     xpt_busfunc_t (struct cam_eb *bus, void *arg);
  144 typedef int     xpt_targetfunc_t (struct cam_et *target, void *arg);
  145 typedef int     xpt_devicefunc_t (struct cam_ed *device, void *arg);
  146 typedef int     xpt_periphfunc_t (struct cam_periph *periph, void *arg);
  147 typedef int     xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg);
  148 
  149 /* Transport layer configuration information */
  150 static struct xpt_softc xsoftc;
  151 
  152 TUNABLE_INT("kern.cam.boot_delay", &xsoftc.boot_delay);
  153 SYSCTL_INT(_kern_cam, OID_AUTO, boot_delay, CTLFLAG_RDTUN,
  154            &xsoftc.boot_delay, 0, "Bus registration wait time");
  155 
  156 /* Queues for our software interrupt handler */
  157 typedef TAILQ_HEAD(cam_isrq, ccb_hdr) cam_isrq_t;
  158 typedef TAILQ_HEAD(cam_simq, cam_sim) cam_simq_t;
  159 static cam_simq_t cam_simq;
  160 static struct mtx cam_simq_lock;
  161 
  162 /* Pointers to software interrupt handlers */
  163 static void *cambio_ih;
  164 
  165 struct cam_periph *xpt_periph;
  166 
  167 static periph_init_t xpt_periph_init;
  168 
  169 static struct periph_driver xpt_driver =
  170 {
  171         xpt_periph_init, "xpt",
  172         TAILQ_HEAD_INITIALIZER(xpt_driver.units), /* generation */ 0,
  173         CAM_PERIPH_DRV_EARLY
  174 };
  175 
  176 PERIPHDRIVER_DECLARE(xpt, xpt_driver);
  177 
  178 static d_open_t xptopen;
  179 static d_close_t xptclose;
  180 static d_ioctl_t xptioctl;
  181 
  182 static struct cdevsw xpt_cdevsw = {
  183         .d_version =    D_VERSION,
  184         .d_flags =      0,
  185         .d_open =       xptopen,
  186         .d_close =      xptclose,
  187         .d_ioctl =      xptioctl,
  188         .d_name =       "xpt",
  189 };
  190 
  191 /* Storage for debugging datastructures */
  192 struct cam_path *cam_dpath;
  193 u_int32_t cam_dflags = CAM_DEBUG_FLAGS;
  194 TUNABLE_INT("kern.cam.dflags", &cam_dflags);
  195 SYSCTL_INT(_kern_cam, OID_AUTO, dflags, CTLFLAG_RW,
  196         &cam_dflags, 0, "Enabled debug flags");
  197 u_int32_t cam_debug_delay = CAM_DEBUG_DELAY;
  198 TUNABLE_INT("kern.cam.debug_delay", &cam_debug_delay);
  199 SYSCTL_INT(_kern_cam, OID_AUTO, debug_delay, CTLFLAG_RW,
  200         &cam_debug_delay, 0, "Delay in us after each debug message");
  201 
  202 /* Our boot-time initialization hook */
  203 static int cam_module_event_handler(module_t, int /*modeventtype_t*/, void *);
  204 
  205 static moduledata_t cam_moduledata = {
  206         "cam",
  207         cam_module_event_handler,
  208         NULL
  209 };
  210 
  211 static int      xpt_init(void *);
  212 
  213 DECLARE_MODULE(cam, cam_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND);
  214 MODULE_VERSION(cam, 1);
  215 
  216 
  217 static void             xpt_async_bcast(struct async_list *async_head,
  218                                         u_int32_t async_code,
  219                                         struct cam_path *path,
  220                                         void *async_arg);
  221 static path_id_t xptnextfreepathid(void);
  222 static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus);
  223 static union ccb *xpt_get_ccb(struct cam_ed *device);
  224 static void      xpt_run_dev_allocq(struct cam_eb *bus);
  225 static void      xpt_run_dev_sendq(struct cam_eb *bus);
  226 static timeout_t xpt_release_devq_timeout;
  227 static void      xpt_release_simq_timeout(void *arg) __unused;
  228 static void      xpt_release_bus(struct cam_eb *bus);
  229 static void      xpt_release_devq_device(struct cam_ed *dev, cam_rl rl,
  230                     u_int count, int run_queue);
  231 static struct cam_et*
  232                  xpt_alloc_target(struct cam_eb *bus, target_id_t target_id);
  233 static void      xpt_release_target(struct cam_et *target);
  234 static struct cam_eb*
  235                  xpt_find_bus(path_id_t path_id);
  236 static struct cam_et*
  237                  xpt_find_target(struct cam_eb *bus, target_id_t target_id);
  238 static struct cam_ed*
  239                  xpt_find_device(struct cam_et *target, lun_id_t lun_id);
  240 static void      xpt_config(void *arg);
  241 static xpt_devicefunc_t xptpassannouncefunc;
  242 static void      xptaction(struct cam_sim *sim, union ccb *work_ccb);
  243 static void      xptpoll(struct cam_sim *sim);
  244 static void      camisr(void *);
  245 static void      camisr_runqueue(void *);
  246 static dev_match_ret    xptbusmatch(struct dev_match_pattern *patterns,
  247                                     u_int num_patterns, struct cam_eb *bus);
  248 static dev_match_ret    xptdevicematch(struct dev_match_pattern *patterns,
  249                                        u_int num_patterns,
  250                                        struct cam_ed *device);
  251 static dev_match_ret    xptperiphmatch(struct dev_match_pattern *patterns,
  252                                        u_int num_patterns,
  253                                        struct cam_periph *periph);
  254 static xpt_busfunc_t    xptedtbusfunc;
  255 static xpt_targetfunc_t xptedttargetfunc;
  256 static xpt_devicefunc_t xptedtdevicefunc;
  257 static xpt_periphfunc_t xptedtperiphfunc;
  258 static xpt_pdrvfunc_t   xptplistpdrvfunc;
  259 static xpt_periphfunc_t xptplistperiphfunc;
  260 static int              xptedtmatch(struct ccb_dev_match *cdm);
  261 static int              xptperiphlistmatch(struct ccb_dev_match *cdm);
  262 static int              xptbustraverse(struct cam_eb *start_bus,
  263                                        xpt_busfunc_t *tr_func, void *arg);
  264 static int              xpttargettraverse(struct cam_eb *bus,
  265                                           struct cam_et *start_target,
  266                                           xpt_targetfunc_t *tr_func, void *arg);
  267 static int              xptdevicetraverse(struct cam_et *target,
  268                                           struct cam_ed *start_device,
  269                                           xpt_devicefunc_t *tr_func, void *arg);
  270 static int              xptperiphtraverse(struct cam_ed *device,
  271                                           struct cam_periph *start_periph,
  272                                           xpt_periphfunc_t *tr_func, void *arg);
  273 static int              xptpdrvtraverse(struct periph_driver **start_pdrv,
  274                                         xpt_pdrvfunc_t *tr_func, void *arg);
  275 static int              xptpdperiphtraverse(struct periph_driver **pdrv,
  276                                             struct cam_periph *start_periph,
  277                                             xpt_periphfunc_t *tr_func,
  278                                             void *arg);
  279 static xpt_busfunc_t    xptdefbusfunc;
  280 static xpt_targetfunc_t xptdeftargetfunc;
  281 static xpt_devicefunc_t xptdefdevicefunc;
  282 static xpt_periphfunc_t xptdefperiphfunc;
  283 static void             xpt_finishconfig_task(void *context, int pending);
  284 static int              xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg);
  285 static int              xpt_for_all_devices(xpt_devicefunc_t *tr_func,
  286                                             void *arg);
  287 static void             xpt_dev_async_default(u_int32_t async_code,
  288                                               struct cam_eb *bus,
  289                                               struct cam_et *target,
  290                                               struct cam_ed *device,
  291                                               void *async_arg);
  292 static struct cam_ed *  xpt_alloc_device_default(struct cam_eb *bus,
  293                                                  struct cam_et *target,
  294                                                  lun_id_t lun_id);
  295 static xpt_devicefunc_t xptsetasyncfunc;
  296 static xpt_busfunc_t    xptsetasyncbusfunc;
  297 static cam_status       xptregister(struct cam_periph *periph,
  298                                     void *arg);
  299 static __inline int periph_is_queued(struct cam_periph *periph);
  300 static __inline int device_is_alloc_queued(struct cam_ed *device);
  301 static __inline int device_is_send_queued(struct cam_ed *device);
  302 
  303 static __inline int
  304 xpt_schedule_dev_allocq(struct cam_eb *bus, struct cam_ed *dev)
  305 {
  306         int retval;
  307 
  308         if ((dev->drvq.entries > 0) &&
  309             (dev->ccbq.devq_openings > 0) &&
  310             (cam_ccbq_frozen(&dev->ccbq, CAM_PRIORITY_TO_RL(
  311                 CAMQ_GET_PRIO(&dev->drvq))) == 0)) {
  312                 /*
  313                  * The priority of a device waiting for CCB resources
  314                  * is that of the highest priority peripheral driver
  315                  * enqueued.
  316                  */
  317                 retval = xpt_schedule_dev(&bus->sim->devq->alloc_queue,
  318                                           &dev->alloc_ccb_entry.pinfo,
  319                                           CAMQ_GET_PRIO(&dev->drvq));
  320         } else {
  321                 retval = 0;
  322         }
  323 
  324         return (retval);
  325 }
  326 
  327 static __inline int
  328 xpt_schedule_dev_sendq(struct cam_eb *bus, struct cam_ed *dev)
  329 {
  330         int     retval;
  331 
  332         if ((dev->ccbq.queue.entries > 0) &&
  333             (dev->ccbq.dev_openings > 0) &&
  334             (cam_ccbq_frozen_top(&dev->ccbq) == 0)) {
  335                 /*
  336                  * The priority of a device waiting for controller
  337                  * resources is that of the highest priority CCB
  338                  * enqueued.
  339                  */
  340                 retval =
  341                     xpt_schedule_dev(&bus->sim->devq->send_queue,
  342                                      &dev->send_ccb_entry.pinfo,
  343                                      CAMQ_GET_PRIO(&dev->ccbq.queue));
  344         } else {
  345                 retval = 0;
  346         }
  347         return (retval);
  348 }
  349 
  350 static __inline int
  351 periph_is_queued(struct cam_periph *periph)
  352 {
  353         return (periph->pinfo.index != CAM_UNQUEUED_INDEX);
  354 }
  355 
  356 static __inline int
  357 device_is_alloc_queued(struct cam_ed *device)
  358 {
  359         return (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
  360 }
  361 
  362 static __inline int
  363 device_is_send_queued(struct cam_ed *device)
  364 {
  365         return (device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
  366 }
  367 
  368 static void
  369 xpt_periph_init()
  370 {
  371         make_dev(&xpt_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "xpt0");
  372 }
  373 
  374 static void
  375 xptdone(struct cam_periph *periph, union ccb *done_ccb)
  376 {
  377         /* Caller will release the CCB */
  378         wakeup(&done_ccb->ccb_h.cbfcnp);
  379 }
  380 
  381 static int
  382 xptopen(struct cdev *dev, int flags, int fmt, struct thread *td)
  383 {
  384 
  385         /*
  386          * Only allow read-write access.
  387          */
  388         if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0))
  389                 return(EPERM);
  390 
  391         /*
  392          * We don't allow nonblocking access.
  393          */
  394         if ((flags & O_NONBLOCK) != 0) {
  395                 printf("%s: can't do nonblocking access\n", devtoname(dev));
  396                 return(ENODEV);
  397         }
  398 
  399         /* Mark ourselves open */
  400         mtx_lock(&xsoftc.xpt_lock);
  401         xsoftc.flags |= XPT_FLAG_OPEN;
  402         mtx_unlock(&xsoftc.xpt_lock);
  403 
  404         return(0);
  405 }
  406 
  407 static int
  408 xptclose(struct cdev *dev, int flag, int fmt, struct thread *td)
  409 {
  410 
  411         /* Mark ourselves closed */
  412         mtx_lock(&xsoftc.xpt_lock);
  413         xsoftc.flags &= ~XPT_FLAG_OPEN;
  414         mtx_unlock(&xsoftc.xpt_lock);
  415 
  416         return(0);
  417 }
  418 
  419 /*
  420  * Don't automatically grab the xpt softc lock here even though this is going
  421  * through the xpt device.  The xpt device is really just a back door for
  422  * accessing other devices and SIMs, so the right thing to do is to grab
  423  * the appropriate SIM lock once the bus/SIM is located.
  424  */
  425 static int
  426 xptioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
  427 {
  428         int error;
  429 
  430         error = 0;
  431 
  432         switch(cmd) {
  433         /*
  434          * For the transport layer CAMIOCOMMAND ioctl, we really only want
  435          * to accept CCB types that don't quite make sense to send through a
  436          * passthrough driver. XPT_PATH_INQ is an exception to this, as stated
  437          * in the CAM spec.
  438          */
  439         case CAMIOCOMMAND: {
  440                 union ccb *ccb;
  441                 union ccb *inccb;
  442                 struct cam_eb *bus;
  443 
  444                 inccb = (union ccb *)addr;
  445 
  446                 bus = xpt_find_bus(inccb->ccb_h.path_id);
  447                 if (bus == NULL)
  448                         return (EINVAL);
  449 
  450                 switch (inccb->ccb_h.func_code) {
  451                 case XPT_SCAN_BUS:
  452                 case XPT_RESET_BUS:
  453                         if (inccb->ccb_h.target_id != CAM_TARGET_WILDCARD ||
  454                             inccb->ccb_h.target_lun != CAM_LUN_WILDCARD) {
  455                                 xpt_release_bus(bus);
  456                                 return (EINVAL);
  457                         }
  458                         break;
  459                 case XPT_SCAN_TGT:
  460                         if (inccb->ccb_h.target_id == CAM_TARGET_WILDCARD ||
  461                             inccb->ccb_h.target_lun != CAM_LUN_WILDCARD) {
  462                                 xpt_release_bus(bus);
  463                                 return (EINVAL);
  464                         }
  465                         break;
  466                 default:
  467                         break;
  468                 }
  469 
  470                 switch(inccb->ccb_h.func_code) {
  471                 case XPT_SCAN_BUS:
  472                 case XPT_RESET_BUS:
  473                 case XPT_PATH_INQ:
  474                 case XPT_ENG_INQ:
  475                 case XPT_SCAN_LUN:
  476                 case XPT_SCAN_TGT:
  477 
  478                         ccb = xpt_alloc_ccb();
  479 
  480                         CAM_SIM_LOCK(bus->sim);
  481 
  482                         /*
  483                          * Create a path using the bus, target, and lun the
  484                          * user passed in.
  485                          */
  486                         if (xpt_create_path(&ccb->ccb_h.path, xpt_periph,
  487                                             inccb->ccb_h.path_id,
  488                                             inccb->ccb_h.target_id,
  489                                             inccb->ccb_h.target_lun) !=
  490                                             CAM_REQ_CMP){
  491                                 error = EINVAL;
  492                                 CAM_SIM_UNLOCK(bus->sim);
  493                                 xpt_free_ccb(ccb);
  494                                 break;
  495                         }
  496                         /* Ensure all of our fields are correct */
  497                         xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path,
  498                                       inccb->ccb_h.pinfo.priority);
  499                         xpt_merge_ccb(ccb, inccb);
  500                         ccb->ccb_h.cbfcnp = xptdone;
  501                         cam_periph_runccb(ccb, NULL, 0, 0, NULL);
  502                         bcopy(ccb, inccb, sizeof(union ccb));
  503                         xpt_free_path(ccb->ccb_h.path);
  504                         xpt_free_ccb(ccb);
  505                         CAM_SIM_UNLOCK(bus->sim);
  506                         break;
  507 
  508                 case XPT_DEBUG: {
  509                         union ccb ccb;
  510 
  511                         /*
  512                          * This is an immediate CCB, so it's okay to
  513                          * allocate it on the stack.
  514                          */
  515 
  516                         CAM_SIM_LOCK(bus->sim);
  517 
  518                         /*
  519                          * Create a path using the bus, target, and lun the
  520                          * user passed in.
  521                          */
  522                         if (xpt_create_path(&ccb.ccb_h.path, xpt_periph,
  523                                             inccb->ccb_h.path_id,
  524                                             inccb->ccb_h.target_id,
  525                                             inccb->ccb_h.target_lun) !=
  526                                             CAM_REQ_CMP){
  527                                 error = EINVAL;
  528                                 CAM_SIM_UNLOCK(bus->sim);
  529                                 break;
  530                         }
  531                         /* Ensure all of our fields are correct */
  532                         xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path,
  533                                       inccb->ccb_h.pinfo.priority);
  534                         xpt_merge_ccb(&ccb, inccb);
  535                         ccb.ccb_h.cbfcnp = xptdone;
  536                         xpt_action(&ccb);
  537                         CAM_SIM_UNLOCK(bus->sim);
  538                         bcopy(&ccb, inccb, sizeof(union ccb));
  539                         xpt_free_path(ccb.ccb_h.path);
  540                         break;
  541 
  542                 }
  543                 case XPT_DEV_MATCH: {
  544                         struct cam_periph_map_info mapinfo;
  545                         struct cam_path *old_path;
  546 
  547                         /*
  548                          * We can't deal with physical addresses for this
  549                          * type of transaction.
  550                          */
  551                         if (inccb->ccb_h.flags & CAM_DATA_PHYS) {
  552                                 error = EINVAL;
  553                                 break;
  554                         }
  555 
  556                         /*
  557                          * Save this in case the caller had it set to
  558                          * something in particular.
  559                          */
  560                         old_path = inccb->ccb_h.path;
  561 
  562                         /*
  563                          * We really don't need a path for the matching
  564                          * code.  The path is needed because of the
  565                          * debugging statements in xpt_action().  They
  566                          * assume that the CCB has a valid path.
  567                          */
  568                         inccb->ccb_h.path = xpt_periph->path;
  569 
  570                         bzero(&mapinfo, sizeof(mapinfo));
  571 
  572                         /*
  573                          * Map the pattern and match buffers into kernel
  574                          * virtual address space.
  575                          */
  576                         error = cam_periph_mapmem(inccb, &mapinfo);
  577 
  578                         if (error) {
  579                                 inccb->ccb_h.path = old_path;
  580                                 break;
  581                         }
  582 
  583                         /*
  584                          * This is an immediate CCB, we can send it on directly.
  585                          */
  586                         xpt_action(inccb);
  587 
  588                         /*
  589                          * Map the buffers back into user space.
  590                          */
  591                         cam_periph_unmapmem(inccb, &mapinfo);
  592 
  593                         inccb->ccb_h.path = old_path;
  594 
  595                         error = 0;
  596                         break;
  597                 }
  598                 default:
  599                         error = ENOTSUP;
  600                         break;
  601                 }
  602                 xpt_release_bus(bus);
  603                 break;
  604         }
  605         /*
  606          * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input,
  607          * with the periphal driver name and unit name filled in.  The other
  608          * fields don't really matter as input.  The passthrough driver name
  609          * ("pass"), and unit number are passed back in the ccb.  The current
  610          * device generation number, and the index into the device peripheral
  611          * driver list, and the status are also passed back.  Note that
  612          * since we do everything in one pass, unlike the XPT_GDEVLIST ccb,
  613          * we never return a status of CAM_GDEVLIST_LIST_CHANGED.  It is
  614          * (or rather should be) impossible for the device peripheral driver
  615          * list to change since we look at the whole thing in one pass, and
  616          * we do it with lock protection.
  617          *
  618          */
  619         case CAMGETPASSTHRU: {
  620                 union ccb *ccb;
  621                 struct cam_periph *periph;
  622                 struct periph_driver **p_drv;
  623                 char   *name;
  624                 u_int unit;
  625                 u_int cur_generation;
  626                 int base_periph_found;
  627                 int splbreaknum;
  628 
  629                 ccb = (union ccb *)addr;
  630                 unit = ccb->cgdl.unit_number;
  631                 name = ccb->cgdl.periph_name;
  632                 /*
  633                  * Every 100 devices, we want to drop our lock protection to
  634                  * give the software interrupt handler a chance to run.
  635                  * Most systems won't run into this check, but this should
  636                  * avoid starvation in the software interrupt handler in
  637                  * large systems.
  638                  */
  639                 splbreaknum = 100;
  640 
  641                 ccb = (union ccb *)addr;
  642 
  643                 base_periph_found = 0;
  644 
  645                 /*
  646                  * Sanity check -- make sure we don't get a null peripheral
  647                  * driver name.
  648                  */
  649                 if (*ccb->cgdl.periph_name == '\0') {
  650                         error = EINVAL;
  651                         break;
  652                 }
  653 
  654                 /* Keep the list from changing while we traverse it */
  655                 mtx_lock(&xsoftc.xpt_topo_lock);
  656 ptstartover:
  657                 cur_generation = xsoftc.xpt_generation;
  658 
  659                 /* first find our driver in the list of drivers */
  660                 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++)
  661                         if (strcmp((*p_drv)->driver_name, name) == 0)
  662                                 break;
  663 
  664                 if (*p_drv == NULL) {
  665                         mtx_unlock(&xsoftc.xpt_topo_lock);
  666                         ccb->ccb_h.status = CAM_REQ_CMP_ERR;
  667                         ccb->cgdl.status = CAM_GDEVLIST_ERROR;
  668                         *ccb->cgdl.periph_name = '\0';
  669                         ccb->cgdl.unit_number = 0;
  670                         error = ENOENT;
  671                         break;
  672                 }
  673 
  674                 /*
  675                  * Run through every peripheral instance of this driver
  676                  * and check to see whether it matches the unit passed
  677                  * in by the user.  If it does, get out of the loops and
  678                  * find the passthrough driver associated with that
  679                  * peripheral driver.
  680                  */
  681                 for (periph = TAILQ_FIRST(&(*p_drv)->units); periph != NULL;
  682                      periph = TAILQ_NEXT(periph, unit_links)) {
  683 
  684                         if (periph->unit_number == unit) {
  685                                 break;
  686                         } else if (--splbreaknum == 0) {
  687                                 mtx_unlock(&xsoftc.xpt_topo_lock);
  688                                 mtx_lock(&xsoftc.xpt_topo_lock);
  689                                 splbreaknum = 100;
  690                                 if (cur_generation != xsoftc.xpt_generation)
  691                                        goto ptstartover;
  692                         }
  693                 }
  694                 /*
  695                  * If we found the peripheral driver that the user passed
  696                  * in, go through all of the peripheral drivers for that
  697                  * particular device and look for a passthrough driver.
  698                  */
  699                 if (periph != NULL) {
  700                         struct cam_ed *device;
  701                         int i;
  702 
  703                         base_periph_found = 1;
  704                         device = periph->path->device;
  705                         for (i = 0, periph = SLIST_FIRST(&device->periphs);
  706                              periph != NULL;
  707                              periph = SLIST_NEXT(periph, periph_links), i++) {
  708                                 /*
  709                                  * Check to see whether we have a
  710                                  * passthrough device or not.
  711                                  */
  712                                 if (strcmp(periph->periph_name, "pass") == 0) {
  713                                         /*
  714                                          * Fill in the getdevlist fields.
  715                                          */
  716                                         strcpy(ccb->cgdl.periph_name,
  717                                                periph->periph_name);
  718                                         ccb->cgdl.unit_number =
  719                                                 periph->unit_number;
  720                                         if (SLIST_NEXT(periph, periph_links))
  721                                                 ccb->cgdl.status =
  722                                                         CAM_GDEVLIST_MORE_DEVS;
  723                                         else
  724                                                 ccb->cgdl.status =
  725                                                        CAM_GDEVLIST_LAST_DEVICE;
  726                                         ccb->cgdl.generation =
  727                                                 device->generation;
  728                                         ccb->cgdl.index = i;
  729                                         /*
  730                                          * Fill in some CCB header fields
  731                                          * that the user may want.
  732                                          */
  733                                         ccb->ccb_h.path_id =
  734                                                 periph->path->bus->path_id;
  735                                         ccb->ccb_h.target_id =
  736                                                 periph->path->target->target_id;
  737                                         ccb->ccb_h.target_lun =
  738                                                 periph->path->device->lun_id;
  739                                         ccb->ccb_h.status = CAM_REQ_CMP;
  740                                         break;
  741                                 }
  742                         }
  743                 }
  744 
  745                 /*
  746                  * If the periph is null here, one of two things has
  747                  * happened.  The first possibility is that we couldn't
  748                  * find the unit number of the particular peripheral driver
  749                  * that the user is asking about.  e.g. the user asks for
  750                  * the passthrough driver for "da11".  We find the list of
  751                  * "da" peripherals all right, but there is no unit 11.
  752                  * The other possibility is that we went through the list
  753                  * of peripheral drivers attached to the device structure,
  754                  * but didn't find one with the name "pass".  Either way,
  755                  * we return ENOENT, since we couldn't find something.
  756                  */
  757                 if (periph == NULL) {
  758                         ccb->ccb_h.status = CAM_REQ_CMP_ERR;
  759                         ccb->cgdl.status = CAM_GDEVLIST_ERROR;
  760                         *ccb->cgdl.periph_name = '\0';
  761                         ccb->cgdl.unit_number = 0;
  762                         error = ENOENT;
  763                         /*
  764                          * It is unfortunate that this is even necessary,
  765                          * but there are many, many clueless users out there.
  766                          * If this is true, the user is looking for the
  767                          * passthrough driver, but doesn't have one in his
  768                          * kernel.
  769                          */
  770                         if (base_periph_found == 1) {
  771                                 printf("xptioctl: pass driver is not in the "
  772                                        "kernel\n");
  773                                 printf("xptioctl: put \"device pass\" in "
  774                                        "your kernel config file\n");
  775                         }
  776                 }
  777                 mtx_unlock(&xsoftc.xpt_topo_lock);
  778                 break;
  779                 }
  780         default:
  781                 error = ENOTTY;
  782                 break;
  783         }
  784 
  785         return(error);
  786 }
  787 
  788 static int
  789 cam_module_event_handler(module_t mod, int what, void *arg)
  790 {
  791         int error;
  792 
  793         switch (what) {
  794         case MOD_LOAD:
  795                 if ((error = xpt_init(NULL)) != 0)
  796                         return (error);
  797                 break;
  798         case MOD_UNLOAD:
  799                 return EBUSY;
  800         default:
  801                 return EOPNOTSUPP;
  802         }
  803 
  804         return 0;
  805 }
  806 
  807 static void
  808 xpt_rescan_done(struct cam_periph *periph, union ccb *done_ccb)
  809 {
  810 
  811         if (done_ccb->ccb_h.ppriv_ptr1 == NULL) {
  812                 xpt_free_path(done_ccb->ccb_h.path);
  813                 xpt_free_ccb(done_ccb);
  814         } else {
  815                 done_ccb->ccb_h.cbfcnp = done_ccb->ccb_h.ppriv_ptr1;
  816                 (*done_ccb->ccb_h.cbfcnp)(periph, done_ccb);
  817         }
  818         xpt_release_boot();
  819 }
  820 
  821 /* thread to handle bus rescans */
  822 static void
  823 xpt_scanner_thread(void *dummy)
  824 {
  825         union ccb       *ccb;
  826         struct cam_sim  *sim;
  827 
  828         xpt_lock_buses();
  829         for (;;) {
  830                 if (TAILQ_EMPTY(&xsoftc.ccb_scanq))
  831                         msleep(&xsoftc.ccb_scanq, &xsoftc.xpt_topo_lock, PRIBIO,
  832                                "ccb_scanq", 0);
  833                 if ((ccb = (union ccb *)TAILQ_FIRST(&xsoftc.ccb_scanq)) != NULL) {
  834                         TAILQ_REMOVE(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe);
  835                         xpt_unlock_buses();
  836 
  837                         sim = ccb->ccb_h.path->bus->sim;
  838                         CAM_SIM_LOCK(sim);
  839                         xpt_action(ccb);
  840                         CAM_SIM_UNLOCK(sim);
  841 
  842                         xpt_lock_buses();
  843                 }
  844         }
  845 }
  846 
  847 void
  848 xpt_rescan(union ccb *ccb)
  849 {
  850         struct ccb_hdr *hdr;
  851 
  852         /* Prepare request */
  853         if (ccb->ccb_h.path->target->target_id == CAM_TARGET_WILDCARD &&
  854             ccb->ccb_h.path->device->lun_id == CAM_LUN_WILDCARD)
  855                 ccb->ccb_h.func_code = XPT_SCAN_BUS;
  856         else if (ccb->ccb_h.path->target->target_id != CAM_TARGET_WILDCARD &&
  857             ccb->ccb_h.path->device->lun_id == CAM_LUN_WILDCARD)
  858                 ccb->ccb_h.func_code = XPT_SCAN_TGT;
  859         else if (ccb->ccb_h.path->target->target_id != CAM_TARGET_WILDCARD &&
  860             ccb->ccb_h.path->device->lun_id != CAM_LUN_WILDCARD)
  861                 ccb->ccb_h.func_code = XPT_SCAN_LUN;
  862         else {
  863                 xpt_print(ccb->ccb_h.path, "illegal scan path\n");
  864                 xpt_free_path(ccb->ccb_h.path);
  865                 xpt_free_ccb(ccb);
  866                 return;
  867         }
  868         ccb->ccb_h.ppriv_ptr1 = ccb->ccb_h.cbfcnp;
  869         ccb->ccb_h.cbfcnp = xpt_rescan_done;
  870         xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, CAM_PRIORITY_XPT);
  871         /* Don't make duplicate entries for the same paths. */
  872         xpt_lock_buses();
  873         if (ccb->ccb_h.ppriv_ptr1 == NULL) {
  874                 TAILQ_FOREACH(hdr, &xsoftc.ccb_scanq, sim_links.tqe) {
  875                         if (xpt_path_comp(hdr->path, ccb->ccb_h.path) == 0) {
  876                                 wakeup(&xsoftc.ccb_scanq);
  877                                 xpt_unlock_buses();
  878                                 xpt_print(ccb->ccb_h.path, "rescan already queued\n");
  879                                 xpt_free_path(ccb->ccb_h.path);
  880                                 xpt_free_ccb(ccb);
  881                                 return;
  882                         }
  883                 }
  884         }
  885         TAILQ_INSERT_TAIL(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe);
  886         xsoftc.buses_to_config++;
  887         wakeup(&xsoftc.ccb_scanq);
  888         xpt_unlock_buses();
  889 }
  890 
  891 /* Functions accessed by the peripheral drivers */
  892 static int
  893 xpt_init(void *dummy)
  894 {
  895         struct cam_sim *xpt_sim;
  896         struct cam_path *path;
  897         struct cam_devq *devq;
  898         cam_status status;
  899 
  900         TAILQ_INIT(&xsoftc.xpt_busses);
  901         TAILQ_INIT(&cam_simq);
  902         TAILQ_INIT(&xsoftc.ccb_scanq);
  903         STAILQ_INIT(&xsoftc.highpowerq);
  904         xsoftc.num_highpower = CAM_MAX_HIGHPOWER;
  905 
  906         mtx_init(&cam_simq_lock, "CAM SIMQ lock", NULL, MTX_DEF);
  907         mtx_init(&xsoftc.xpt_lock, "XPT lock", NULL, MTX_DEF);
  908         mtx_init(&xsoftc.xpt_topo_lock, "XPT topology lock", NULL, MTX_DEF);
  909 
  910         /*
  911          * The xpt layer is, itself, the equivelent of a SIM.
  912          * Allow 16 ccbs in the ccb pool for it.  This should
  913          * give decent parallelism when we probe busses and
  914          * perform other XPT functions.
  915          */
  916         devq = cam_simq_alloc(16);
  917         xpt_sim = cam_sim_alloc(xptaction,
  918                                 xptpoll,
  919                                 "xpt",
  920                                 /*softc*/NULL,
  921                                 /*unit*/0,
  922                                 /*mtx*/&xsoftc.xpt_lock,
  923                                 /*max_dev_transactions*/0,
  924                                 /*max_tagged_dev_transactions*/0,
  925                                 devq);
  926         if (xpt_sim == NULL)
  927                 return (ENOMEM);
  928 
  929         mtx_lock(&xsoftc.xpt_lock);
  930         if ((status = xpt_bus_register(xpt_sim, NULL, 0)) != CAM_SUCCESS) {
  931                 mtx_unlock(&xsoftc.xpt_lock);
  932                 printf("xpt_init: xpt_bus_register failed with status %#x,"
  933                        " failing attach\n", status);
  934                 return (EINVAL);
  935         }
  936 
  937         /*
  938          * Looking at the XPT from the SIM layer, the XPT is
  939          * the equivelent of a peripheral driver.  Allocate
  940          * a peripheral driver entry for us.
  941          */
  942         if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID,
  943                                       CAM_TARGET_WILDCARD,
  944                                       CAM_LUN_WILDCARD)) != CAM_REQ_CMP) {
  945                 mtx_unlock(&xsoftc.xpt_lock);
  946                 printf("xpt_init: xpt_create_path failed with status %#x,"
  947                        " failing attach\n", status);
  948                 return (EINVAL);
  949         }
  950 
  951         cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO,
  952                          path, NULL, 0, xpt_sim);
  953         xpt_free_path(path);
  954         mtx_unlock(&xsoftc.xpt_lock);
  955         /* Install our software interrupt handlers */
  956         swi_add(NULL, "cambio", camisr, NULL, SWI_CAMBIO, INTR_MPSAFE, &cambio_ih);
  957         /*
  958          * Register a callback for when interrupts are enabled.
  959          */
  960         xsoftc.xpt_config_hook =
  961             (struct intr_config_hook *)malloc(sizeof(struct intr_config_hook),
  962                                               M_CAMXPT, M_NOWAIT | M_ZERO);
  963         if (xsoftc.xpt_config_hook == NULL) {
  964                 printf("xpt_init: Cannot malloc config hook "
  965                        "- failing attach\n");
  966                 return (ENOMEM);
  967         }
  968         xsoftc.xpt_config_hook->ich_func = xpt_config;
  969         if (config_intrhook_establish(xsoftc.xpt_config_hook) != 0) {
  970                 free (xsoftc.xpt_config_hook, M_CAMXPT);
  971                 printf("xpt_init: config_intrhook_establish failed "
  972                        "- failing attach\n");
  973         }
  974 
  975         return (0);
  976 }
  977 
  978 static cam_status
  979 xptregister(struct cam_periph *periph, void *arg)
  980 {
  981         struct cam_sim *xpt_sim;
  982 
  983         if (periph == NULL) {
  984                 printf("xptregister: periph was NULL!!\n");
  985                 return(CAM_REQ_CMP_ERR);
  986         }
  987 
  988         xpt_sim = (struct cam_sim *)arg;
  989         xpt_sim->softc = periph;
  990         xpt_periph = periph;
  991         periph->softc = NULL;
  992 
  993         return(CAM_REQ_CMP);
  994 }
  995 
  996 int32_t
  997 xpt_add_periph(struct cam_periph *periph)
  998 {
  999         struct cam_ed *device;
 1000         int32_t  status;
 1001         struct periph_list *periph_head;
 1002 
 1003         mtx_assert(periph->sim->mtx, MA_OWNED);
 1004 
 1005         device = periph->path->device;
 1006 
 1007         periph_head = &device->periphs;
 1008 
 1009         status = CAM_REQ_CMP;
 1010 
 1011         if (device != NULL) {
 1012                 /*
 1013                  * Make room for this peripheral
 1014                  * so it will fit in the queue
 1015                  * when it's scheduled to run
 1016                  */
 1017                 status = camq_resize(&device->drvq,
 1018                                      device->drvq.array_size + 1);
 1019 
 1020                 device->generation++;
 1021 
 1022                 SLIST_INSERT_HEAD(periph_head, periph, periph_links);
 1023         }
 1024 
 1025         mtx_lock(&xsoftc.xpt_topo_lock);
 1026         xsoftc.xpt_generation++;
 1027         mtx_unlock(&xsoftc.xpt_topo_lock);
 1028 
 1029         return (status);
 1030 }
 1031 
 1032 void
 1033 xpt_remove_periph(struct cam_periph *periph)
 1034 {
 1035         struct cam_ed *device;
 1036 
 1037         mtx_assert(periph->sim->mtx, MA_OWNED);
 1038 
 1039         device = periph->path->device;
 1040 
 1041         if (device != NULL) {
 1042                 struct periph_list *periph_head;
 1043 
 1044                 periph_head = &device->periphs;
 1045 
 1046                 /* Release the slot for this peripheral */
 1047                 camq_resize(&device->drvq, device->drvq.array_size - 1);
 1048 
 1049                 device->generation++;
 1050 
 1051                 SLIST_REMOVE(periph_head, periph, cam_periph, periph_links);
 1052         }
 1053 
 1054         mtx_lock(&xsoftc.xpt_topo_lock);
 1055         xsoftc.xpt_generation++;
 1056         mtx_unlock(&xsoftc.xpt_topo_lock);
 1057 }
 1058 
 1059 
 1060 void
 1061 xpt_announce_periph(struct cam_periph *periph, char *announce_string)
 1062 {
 1063         struct  cam_path *path = periph->path;
 1064 
 1065         mtx_assert(periph->sim->mtx, MA_OWNED);
 1066 
 1067         printf("%s%d at %s%d bus %d scbus%d target %d lun %d\n",
 1068                periph->periph_name, periph->unit_number,
 1069                path->bus->sim->sim_name,
 1070                path->bus->sim->unit_number,
 1071                path->bus->sim->bus_id,
 1072                path->bus->path_id,
 1073                path->target->target_id,
 1074                path->device->lun_id);
 1075         printf("%s%d: ", periph->periph_name, periph->unit_number);
 1076         if (path->device->protocol == PROTO_SCSI)
 1077                 scsi_print_inquiry(&path->device->inq_data);
 1078         else if (path->device->protocol == PROTO_ATA ||
 1079             path->device->protocol == PROTO_SATAPM)
 1080                 ata_print_ident(&path->device->ident_data);
 1081         else
 1082                 printf("Unknown protocol device\n");
 1083         if (bootverbose && path->device->serial_num_len > 0) {
 1084                 /* Don't wrap the screen  - print only the first 60 chars */
 1085                 printf("%s%d: Serial Number %.60s\n", periph->periph_name,
 1086                        periph->unit_number, path->device->serial_num);
 1087         }
 1088         /* Announce transport details. */
 1089         (*(path->bus->xport->announce))(periph);
 1090         /* Announce command queueing. */
 1091         if (path->device->inq_flags & SID_CmdQue
 1092          || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
 1093                 printf("%s%d: Command Queueing enabled\n",
 1094                        periph->periph_name, periph->unit_number);
 1095         }
 1096         /* Announce caller's details if they've passed in. */
 1097         if (announce_string != NULL)
 1098                 printf("%s%d: %s\n", periph->periph_name,
 1099                        periph->unit_number, announce_string);
 1100 }
 1101 
 1102 static dev_match_ret
 1103 xptbusmatch(struct dev_match_pattern *patterns, u_int num_patterns,
 1104             struct cam_eb *bus)
 1105 {
 1106         dev_match_ret retval;
 1107         int i;
 1108 
 1109         retval = DM_RET_NONE;
 1110 
 1111         /*
 1112          * If we aren't given something to match against, that's an error.
 1113          */
 1114         if (bus == NULL)
 1115                 return(DM_RET_ERROR);
 1116 
 1117         /*
 1118          * If there are no match entries, then this bus matches no
 1119          * matter what.
 1120          */
 1121         if ((patterns == NULL) || (num_patterns == 0))
 1122                 return(DM_RET_DESCEND | DM_RET_COPY);
 1123 
 1124         for (i = 0; i < num_patterns; i++) {
 1125                 struct bus_match_pattern *cur_pattern;
 1126 
 1127                 /*
 1128                  * If the pattern in question isn't for a bus node, we
 1129                  * aren't interested.  However, we do indicate to the
 1130                  * calling routine that we should continue descending the
 1131                  * tree, since the user wants to match against lower-level
 1132                  * EDT elements.
 1133                  */
 1134                 if (patterns[i].type != DEV_MATCH_BUS) {
 1135                         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
 1136                                 retval |= DM_RET_DESCEND;
 1137                         continue;
 1138                 }
 1139 
 1140                 cur_pattern = &patterns[i].pattern.bus_pattern;
 1141 
 1142                 /*
 1143                  * If they want to match any bus node, we give them any
 1144                  * device node.
 1145                  */
 1146                 if (cur_pattern->flags == BUS_MATCH_ANY) {
 1147                         /* set the copy flag */
 1148                         retval |= DM_RET_COPY;
 1149 
 1150                         /*
 1151                          * If we've already decided on an action, go ahead
 1152                          * and return.
 1153                          */
 1154                         if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
 1155                                 return(retval);
 1156                 }
 1157 
 1158                 /*
 1159                  * Not sure why someone would do this...
 1160                  */
 1161                 if (cur_pattern->flags == BUS_MATCH_NONE)
 1162                         continue;
 1163 
 1164                 if (((cur_pattern->flags & BUS_MATCH_PATH) != 0)
 1165                  && (cur_pattern->path_id != bus->path_id))
 1166                         continue;
 1167 
 1168                 if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0)
 1169                  && (cur_pattern->bus_id != bus->sim->bus_id))
 1170                         continue;
 1171 
 1172                 if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0)
 1173                  && (cur_pattern->unit_number != bus->sim->unit_number))
 1174                         continue;
 1175 
 1176                 if (((cur_pattern->flags & BUS_MATCH_NAME) != 0)
 1177                  && (strncmp(cur_pattern->dev_name, bus->sim->sim_name,
 1178                              DEV_IDLEN) != 0))
 1179                         continue;
 1180 
 1181                 /*
 1182                  * If we get to this point, the user definitely wants
 1183                  * information on this bus.  So tell the caller to copy the
 1184                  * data out.
 1185                  */
 1186                 retval |= DM_RET_COPY;
 1187 
 1188                 /*
 1189                  * If the return action has been set to descend, then we
 1190                  * know that we've already seen a non-bus matching
 1191                  * expression, therefore we need to further descend the tree.
 1192                  * This won't change by continuing around the loop, so we
 1193                  * go ahead and return.  If we haven't seen a non-bus
 1194                  * matching expression, we keep going around the loop until
 1195                  * we exhaust the matching expressions.  We'll set the stop
 1196                  * flag once we fall out of the loop.
 1197                  */
 1198                 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
 1199                         return(retval);
 1200         }
 1201 
 1202         /*
 1203          * If the return action hasn't been set to descend yet, that means
 1204          * we haven't seen anything other than bus matching patterns.  So
 1205          * tell the caller to stop descending the tree -- the user doesn't
 1206          * want to match against lower level tree elements.
 1207          */
 1208         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
 1209                 retval |= DM_RET_STOP;
 1210 
 1211         return(retval);
 1212 }
 1213 
 1214 static dev_match_ret
 1215 xptdevicematch(struct dev_match_pattern *patterns, u_int num_patterns,
 1216                struct cam_ed *device)
 1217 {
 1218         dev_match_ret retval;
 1219         int i;
 1220 
 1221         retval = DM_RET_NONE;
 1222 
 1223         /*
 1224          * If we aren't given something to match against, that's an error.
 1225          */
 1226         if (device == NULL)
 1227                 return(DM_RET_ERROR);
 1228 
 1229         /*
 1230          * If there are no match entries, then this device matches no
 1231          * matter what.
 1232          */
 1233         if ((patterns == NULL) || (num_patterns == 0))
 1234                 return(DM_RET_DESCEND | DM_RET_COPY);
 1235 
 1236         for (i = 0; i < num_patterns; i++) {
 1237                 struct device_match_pattern *cur_pattern;
 1238 
 1239                 /*
 1240                  * If the pattern in question isn't for a device node, we
 1241                  * aren't interested.
 1242                  */
 1243                 if (patterns[i].type != DEV_MATCH_DEVICE) {
 1244                         if ((patterns[i].type == DEV_MATCH_PERIPH)
 1245                          && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE))
 1246                                 retval |= DM_RET_DESCEND;
 1247                         continue;
 1248                 }
 1249 
 1250                 cur_pattern = &patterns[i].pattern.device_pattern;
 1251 
 1252                 /*
 1253                  * If they want to match any device node, we give them any
 1254                  * device node.
 1255                  */
 1256                 if (cur_pattern->flags == DEV_MATCH_ANY) {
 1257                         /* set the copy flag */
 1258                         retval |= DM_RET_COPY;
 1259 
 1260 
 1261                         /*
 1262                          * If we've already decided on an action, go ahead
 1263                          * and return.
 1264                          */
 1265                         if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
 1266                                 return(retval);
 1267                 }
 1268 
 1269                 /*
 1270                  * Not sure why someone would do this...
 1271                  */
 1272                 if (cur_pattern->flags == DEV_MATCH_NONE)
 1273                         continue;
 1274 
 1275                 if (((cur_pattern->flags & DEV_MATCH_PATH) != 0)
 1276                  && (cur_pattern->path_id != device->target->bus->path_id))
 1277                         continue;
 1278 
 1279                 if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0)
 1280                  && (cur_pattern->target_id != device->target->target_id))
 1281                         continue;
 1282 
 1283                 if (((cur_pattern->flags & DEV_MATCH_LUN) != 0)
 1284                  && (cur_pattern->target_lun != device->lun_id))
 1285                         continue;
 1286 
 1287                 if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0)
 1288                  && (cam_quirkmatch((caddr_t)&device->inq_data,
 1289                                     (caddr_t)&cur_pattern->inq_pat,
 1290                                     1, sizeof(cur_pattern->inq_pat),
 1291                                     scsi_static_inquiry_match) == NULL))
 1292                         continue;
 1293 
 1294                 /*
 1295                  * If we get to this point, the user definitely wants
 1296                  * information on this device.  So tell the caller to copy
 1297                  * the data out.
 1298                  */
 1299                 retval |= DM_RET_COPY;
 1300 
 1301                 /*
 1302                  * If the return action has been set to descend, then we
 1303                  * know that we've already seen a peripheral matching
 1304                  * expression, therefore we need to further descend the tree.
 1305                  * This won't change by continuing around the loop, so we
 1306                  * go ahead and return.  If we haven't seen a peripheral
 1307                  * matching expression, we keep going around the loop until
 1308                  * we exhaust the matching expressions.  We'll set the stop
 1309                  * flag once we fall out of the loop.
 1310                  */
 1311                 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
 1312                         return(retval);
 1313         }
 1314 
 1315         /*
 1316          * If the return action hasn't been set to descend yet, that means
 1317          * we haven't seen any peripheral matching patterns.  So tell the
 1318          * caller to stop descending the tree -- the user doesn't want to
 1319          * match against lower level tree elements.
 1320          */
 1321         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
 1322                 retval |= DM_RET_STOP;
 1323 
 1324         return(retval);
 1325 }
 1326 
 1327 /*
 1328  * Match a single peripheral against any number of match patterns.
 1329  */
 1330 static dev_match_ret
 1331 xptperiphmatch(struct dev_match_pattern *patterns, u_int num_patterns,
 1332                struct cam_periph *periph)
 1333 {
 1334         dev_match_ret retval;
 1335         int i;
 1336 
 1337         /*
 1338          * If we aren't given something to match against, that's an error.
 1339          */
 1340         if (periph == NULL)
 1341                 return(DM_RET_ERROR);
 1342 
 1343         /*
 1344          * If there are no match entries, then this peripheral matches no
 1345          * matter what.
 1346          */
 1347         if ((patterns == NULL) || (num_patterns == 0))
 1348                 return(DM_RET_STOP | DM_RET_COPY);
 1349 
 1350         /*
 1351          * There aren't any nodes below a peripheral node, so there's no
 1352          * reason to descend the tree any further.
 1353          */
 1354         retval = DM_RET_STOP;
 1355 
 1356         for (i = 0; i < num_patterns; i++) {
 1357                 struct periph_match_pattern *cur_pattern;
 1358 
 1359                 /*
 1360                  * If the pattern in question isn't for a peripheral, we
 1361                  * aren't interested.
 1362                  */
 1363                 if (patterns[i].type != DEV_MATCH_PERIPH)
 1364                         continue;
 1365 
 1366                 cur_pattern = &patterns[i].pattern.periph_pattern;
 1367 
 1368                 /*
 1369                  * If they want to match on anything, then we will do so.
 1370                  */
 1371                 if (cur_pattern->flags == PERIPH_MATCH_ANY) {
 1372                         /* set the copy flag */
 1373                         retval |= DM_RET_COPY;
 1374 
 1375                         /*
 1376                          * We've already set the return action to stop,
 1377                          * since there are no nodes below peripherals in
 1378                          * the tree.
 1379                          */
 1380                         return(retval);
 1381                 }
 1382 
 1383                 /*
 1384                  * Not sure why someone would do this...
 1385                  */
 1386                 if (cur_pattern->flags == PERIPH_MATCH_NONE)
 1387                         continue;
 1388 
 1389                 if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0)
 1390                  && (cur_pattern->path_id != periph->path->bus->path_id))
 1391                         continue;
 1392 
 1393                 /*
 1394                  * For the target and lun id's, we have to make sure the
 1395                  * target and lun pointers aren't NULL.  The xpt peripheral
 1396                  * has a wildcard target and device.
 1397                  */
 1398                 if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0)
 1399                  && ((periph->path->target == NULL)
 1400                  ||(cur_pattern->target_id != periph->path->target->target_id)))
 1401                         continue;
 1402 
 1403                 if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0)
 1404                  && ((periph->path->device == NULL)
 1405                  || (cur_pattern->target_lun != periph->path->device->lun_id)))
 1406                         continue;
 1407 
 1408                 if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0)
 1409                  && (cur_pattern->unit_number != periph->unit_number))
 1410                         continue;
 1411 
 1412                 if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0)
 1413                  && (strncmp(cur_pattern->periph_name, periph->periph_name,
 1414                              DEV_IDLEN) != 0))
 1415                         continue;
 1416 
 1417                 /*
 1418                  * If we get to this point, the user definitely wants
 1419                  * information on this peripheral.  So tell the caller to
 1420                  * copy the data out.
 1421                  */
 1422                 retval |= DM_RET_COPY;
 1423 
 1424                 /*
 1425                  * The return action has already been set to stop, since
 1426                  * peripherals don't have any nodes below them in the EDT.
 1427                  */
 1428                 return(retval);
 1429         }
 1430 
 1431         /*
 1432          * If we get to this point, the peripheral that was passed in
 1433          * doesn't match any of the patterns.
 1434          */
 1435         return(retval);
 1436 }
 1437 
 1438 static int
 1439 xptedtbusfunc(struct cam_eb *bus, void *arg)
 1440 {
 1441         struct ccb_dev_match *cdm;
 1442         dev_match_ret retval;
 1443 
 1444         cdm = (struct ccb_dev_match *)arg;
 1445 
 1446         /*
 1447          * If our position is for something deeper in the tree, that means
 1448          * that we've already seen this node.  So, we keep going down.
 1449          */
 1450         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 1451          && (cdm->pos.cookie.bus == bus)
 1452          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 1453          && (cdm->pos.cookie.target != NULL))
 1454                 retval = DM_RET_DESCEND;
 1455         else
 1456                 retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus);
 1457 
 1458         /*
 1459          * If we got an error, bail out of the search.
 1460          */
 1461         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
 1462                 cdm->status = CAM_DEV_MATCH_ERROR;
 1463                 return(0);
 1464         }
 1465 
 1466         /*
 1467          * If the copy flag is set, copy this bus out.
 1468          */
 1469         if (retval & DM_RET_COPY) {
 1470                 int spaceleft, j;
 1471 
 1472                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
 1473                         sizeof(struct dev_match_result));
 1474 
 1475                 /*
 1476                  * If we don't have enough space to put in another
 1477                  * match result, save our position and tell the
 1478                  * user there are more devices to check.
 1479                  */
 1480                 if (spaceleft < sizeof(struct dev_match_result)) {
 1481                         bzero(&cdm->pos, sizeof(cdm->pos));
 1482                         cdm->pos.position_type =
 1483                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS;
 1484 
 1485                         cdm->pos.cookie.bus = bus;
 1486                         cdm->pos.generations[CAM_BUS_GENERATION]=
 1487                                 xsoftc.bus_generation;
 1488                         cdm->status = CAM_DEV_MATCH_MORE;
 1489                         return(0);
 1490                 }
 1491                 j = cdm->num_matches;
 1492                 cdm->num_matches++;
 1493                 cdm->matches[j].type = DEV_MATCH_BUS;
 1494                 cdm->matches[j].result.bus_result.path_id = bus->path_id;
 1495                 cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id;
 1496                 cdm->matches[j].result.bus_result.unit_number =
 1497                         bus->sim->unit_number;
 1498                 strncpy(cdm->matches[j].result.bus_result.dev_name,
 1499                         bus->sim->sim_name, DEV_IDLEN);
 1500         }
 1501 
 1502         /*
 1503          * If the user is only interested in busses, there's no
 1504          * reason to descend to the next level in the tree.
 1505          */
 1506         if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
 1507                 return(1);
 1508 
 1509         /*
 1510          * If there is a target generation recorded, check it to
 1511          * make sure the target list hasn't changed.
 1512          */
 1513         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 1514          && (bus == cdm->pos.cookie.bus)
 1515          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 1516          && (cdm->pos.generations[CAM_TARGET_GENERATION] != 0)
 1517          && (cdm->pos.generations[CAM_TARGET_GENERATION] !=
 1518              bus->generation)) {
 1519                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
 1520                 return(0);
 1521         }
 1522 
 1523         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 1524          && (cdm->pos.cookie.bus == bus)
 1525          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 1526          && (cdm->pos.cookie.target != NULL))
 1527                 return(xpttargettraverse(bus,
 1528                                         (struct cam_et *)cdm->pos.cookie.target,
 1529                                          xptedttargetfunc, arg));
 1530         else
 1531                 return(xpttargettraverse(bus, NULL, xptedttargetfunc, arg));
 1532 }
 1533 
 1534 static int
 1535 xptedttargetfunc(struct cam_et *target, void *arg)
 1536 {
 1537         struct ccb_dev_match *cdm;
 1538 
 1539         cdm = (struct ccb_dev_match *)arg;
 1540 
 1541         /*
 1542          * If there is a device list generation recorded, check it to
 1543          * make sure the device list hasn't changed.
 1544          */
 1545         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 1546          && (cdm->pos.cookie.bus == target->bus)
 1547          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 1548          && (cdm->pos.cookie.target == target)
 1549          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
 1550          && (cdm->pos.generations[CAM_DEV_GENERATION] != 0)
 1551          && (cdm->pos.generations[CAM_DEV_GENERATION] !=
 1552              target->generation)) {
 1553                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
 1554                 return(0);
 1555         }
 1556 
 1557         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 1558          && (cdm->pos.cookie.bus == target->bus)
 1559          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 1560          && (cdm->pos.cookie.target == target)
 1561          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
 1562          && (cdm->pos.cookie.device != NULL))
 1563                 return(xptdevicetraverse(target,
 1564                                         (struct cam_ed *)cdm->pos.cookie.device,
 1565                                          xptedtdevicefunc, arg));
 1566         else
 1567                 return(xptdevicetraverse(target, NULL, xptedtdevicefunc, arg));
 1568 }
 1569 
 1570 static int
 1571 xptedtdevicefunc(struct cam_ed *device, void *arg)
 1572 {
 1573 
 1574         struct ccb_dev_match *cdm;
 1575         dev_match_ret retval;
 1576 
 1577         cdm = (struct ccb_dev_match *)arg;
 1578 
 1579         /*
 1580          * If our position is for something deeper in the tree, that means
 1581          * that we've already seen this node.  So, we keep going down.
 1582          */
 1583         if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE)
 1584          && (cdm->pos.cookie.device == device)
 1585          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
 1586          && (cdm->pos.cookie.periph != NULL))
 1587                 retval = DM_RET_DESCEND;
 1588         else
 1589                 retval = xptdevicematch(cdm->patterns, cdm->num_patterns,
 1590                                         device);
 1591 
 1592         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
 1593                 cdm->status = CAM_DEV_MATCH_ERROR;
 1594                 return(0);
 1595         }
 1596 
 1597         /*
 1598          * If the copy flag is set, copy this device out.
 1599          */
 1600         if (retval & DM_RET_COPY) {
 1601                 int spaceleft, j;
 1602 
 1603                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
 1604                         sizeof(struct dev_match_result));
 1605 
 1606                 /*
 1607                  * If we don't have enough space to put in another
 1608                  * match result, save our position and tell the
 1609                  * user there are more devices to check.
 1610                  */
 1611                 if (spaceleft < sizeof(struct dev_match_result)) {
 1612                         bzero(&cdm->pos, sizeof(cdm->pos));
 1613                         cdm->pos.position_type =
 1614                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
 1615                                 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE;
 1616 
 1617                         cdm->pos.cookie.bus = device->target->bus;
 1618                         cdm->pos.generations[CAM_BUS_GENERATION]=
 1619                                 xsoftc.bus_generation;
 1620                         cdm->pos.cookie.target = device->target;
 1621                         cdm->pos.generations[CAM_TARGET_GENERATION] =
 1622                                 device->target->bus->generation;
 1623                         cdm->pos.cookie.device = device;
 1624                         cdm->pos.generations[CAM_DEV_GENERATION] =
 1625                                 device->target->generation;
 1626                         cdm->status = CAM_DEV_MATCH_MORE;
 1627                         return(0);
 1628                 }
 1629                 j = cdm->num_matches;
 1630                 cdm->num_matches++;
 1631                 cdm->matches[j].type = DEV_MATCH_DEVICE;
 1632                 cdm->matches[j].result.device_result.path_id =
 1633                         device->target->bus->path_id;
 1634                 cdm->matches[j].result.device_result.target_id =
 1635                         device->target->target_id;
 1636                 cdm->matches[j].result.device_result.target_lun =
 1637                         device->lun_id;
 1638                 cdm->matches[j].result.device_result.protocol =
 1639                         device->protocol;
 1640                 bcopy(&device->inq_data,
 1641                       &cdm->matches[j].result.device_result.inq_data,
 1642                       sizeof(struct scsi_inquiry_data));
 1643                 bcopy(&device->ident_data,
 1644                       &cdm->matches[j].result.device_result.ident_data,
 1645                       sizeof(struct ata_params));
 1646 
 1647                 /* Let the user know whether this device is unconfigured */
 1648                 if (device->flags & CAM_DEV_UNCONFIGURED)
 1649                         cdm->matches[j].result.device_result.flags =
 1650                                 DEV_RESULT_UNCONFIGURED;
 1651                 else
 1652                         cdm->matches[j].result.device_result.flags =
 1653                                 DEV_RESULT_NOFLAG;
 1654         }
 1655 
 1656         /*
 1657          * If the user isn't interested in peripherals, don't descend
 1658          * the tree any further.
 1659          */
 1660         if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
 1661                 return(1);
 1662 
 1663         /*
 1664          * If there is a peripheral list generation recorded, make sure
 1665          * it hasn't changed.
 1666          */
 1667         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 1668          && (device->target->bus == cdm->pos.cookie.bus)
 1669          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 1670          && (device->target == cdm->pos.cookie.target)
 1671          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
 1672          && (device == cdm->pos.cookie.device)
 1673          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
 1674          && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
 1675          && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
 1676              device->generation)){
 1677                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
 1678                 return(0);
 1679         }
 1680 
 1681         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 1682          && (cdm->pos.cookie.bus == device->target->bus)
 1683          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 1684          && (cdm->pos.cookie.target == device->target)
 1685          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
 1686          && (cdm->pos.cookie.device == device)
 1687          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
 1688          && (cdm->pos.cookie.periph != NULL))
 1689                 return(xptperiphtraverse(device,
 1690                                 (struct cam_periph *)cdm->pos.cookie.periph,
 1691                                 xptedtperiphfunc, arg));
 1692         else
 1693                 return(xptperiphtraverse(device, NULL, xptedtperiphfunc, arg));
 1694 }
 1695 
 1696 static int
 1697 xptedtperiphfunc(struct cam_periph *periph, void *arg)
 1698 {
 1699         struct ccb_dev_match *cdm;
 1700         dev_match_ret retval;
 1701 
 1702         cdm = (struct ccb_dev_match *)arg;
 1703 
 1704         retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
 1705 
 1706         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
 1707                 cdm->status = CAM_DEV_MATCH_ERROR;
 1708                 return(0);
 1709         }
 1710 
 1711         /*
 1712          * If the copy flag is set, copy this peripheral out.
 1713          */
 1714         if (retval & DM_RET_COPY) {
 1715                 int spaceleft, j;
 1716 
 1717                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
 1718                         sizeof(struct dev_match_result));
 1719 
 1720                 /*
 1721                  * If we don't have enough space to put in another
 1722                  * match result, save our position and tell the
 1723                  * user there are more devices to check.
 1724                  */
 1725                 if (spaceleft < sizeof(struct dev_match_result)) {
 1726                         bzero(&cdm->pos, sizeof(cdm->pos));
 1727                         cdm->pos.position_type =
 1728                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
 1729                                 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE |
 1730                                 CAM_DEV_POS_PERIPH;
 1731 
 1732                         cdm->pos.cookie.bus = periph->path->bus;
 1733                         cdm->pos.generations[CAM_BUS_GENERATION]=
 1734                                 xsoftc.bus_generation;
 1735                         cdm->pos.cookie.target = periph->path->target;
 1736                         cdm->pos.generations[CAM_TARGET_GENERATION] =
 1737                                 periph->path->bus->generation;
 1738                         cdm->pos.cookie.device = periph->path->device;
 1739                         cdm->pos.generations[CAM_DEV_GENERATION] =
 1740                                 periph->path->target->generation;
 1741                         cdm->pos.cookie.periph = periph;
 1742                         cdm->pos.generations[CAM_PERIPH_GENERATION] =
 1743                                 periph->path->device->generation;
 1744                         cdm->status = CAM_DEV_MATCH_MORE;
 1745                         return(0);
 1746                 }
 1747 
 1748                 j = cdm->num_matches;
 1749                 cdm->num_matches++;
 1750                 cdm->matches[j].type = DEV_MATCH_PERIPH;
 1751                 cdm->matches[j].result.periph_result.path_id =
 1752                         periph->path->bus->path_id;
 1753                 cdm->matches[j].result.periph_result.target_id =
 1754                         periph->path->target->target_id;
 1755                 cdm->matches[j].result.periph_result.target_lun =
 1756                         periph->path->device->lun_id;
 1757                 cdm->matches[j].result.periph_result.unit_number =
 1758                         periph->unit_number;
 1759                 strncpy(cdm->matches[j].result.periph_result.periph_name,
 1760                         periph->periph_name, DEV_IDLEN);
 1761         }
 1762 
 1763         return(1);
 1764 }
 1765 
 1766 static int
 1767 xptedtmatch(struct ccb_dev_match *cdm)
 1768 {
 1769         int ret;
 1770 
 1771         cdm->num_matches = 0;
 1772 
 1773         /*
 1774          * Check the bus list generation.  If it has changed, the user
 1775          * needs to reset everything and start over.
 1776          */
 1777         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 1778          && (cdm->pos.generations[CAM_BUS_GENERATION] != 0)
 1779          && (cdm->pos.generations[CAM_BUS_GENERATION] != xsoftc.bus_generation)) {
 1780                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
 1781                 return(0);
 1782         }
 1783 
 1784         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 1785          && (cdm->pos.cookie.bus != NULL))
 1786                 ret = xptbustraverse((struct cam_eb *)cdm->pos.cookie.bus,
 1787                                      xptedtbusfunc, cdm);
 1788         else
 1789                 ret = xptbustraverse(NULL, xptedtbusfunc, cdm);
 1790 
 1791         /*
 1792          * If we get back 0, that means that we had to stop before fully
 1793          * traversing the EDT.  It also means that one of the subroutines
 1794          * has set the status field to the proper value.  If we get back 1,
 1795          * we've fully traversed the EDT and copied out any matching entries.
 1796          */
 1797         if (ret == 1)
 1798                 cdm->status = CAM_DEV_MATCH_LAST;
 1799 
 1800         return(ret);
 1801 }
 1802 
 1803 static int
 1804 xptplistpdrvfunc(struct periph_driver **pdrv, void *arg)
 1805 {
 1806         struct ccb_dev_match *cdm;
 1807 
 1808         cdm = (struct ccb_dev_match *)arg;
 1809 
 1810         if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
 1811          && (cdm->pos.cookie.pdrv == pdrv)
 1812          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
 1813          && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
 1814          && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
 1815              (*pdrv)->generation)) {
 1816                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
 1817                 return(0);
 1818         }
 1819 
 1820         if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
 1821          && (cdm->pos.cookie.pdrv == pdrv)
 1822          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
 1823          && (cdm->pos.cookie.periph != NULL))
 1824                 return(xptpdperiphtraverse(pdrv,
 1825                                 (struct cam_periph *)cdm->pos.cookie.periph,
 1826                                 xptplistperiphfunc, arg));
 1827         else
 1828                 return(xptpdperiphtraverse(pdrv, NULL,xptplistperiphfunc, arg));
 1829 }
 1830 
 1831 static int
 1832 xptplistperiphfunc(struct cam_periph *periph, void *arg)
 1833 {
 1834         struct ccb_dev_match *cdm;
 1835         dev_match_ret retval;
 1836 
 1837         cdm = (struct ccb_dev_match *)arg;
 1838 
 1839         retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
 1840 
 1841         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
 1842                 cdm->status = CAM_DEV_MATCH_ERROR;
 1843                 return(0);
 1844         }
 1845 
 1846         /*
 1847          * If the copy flag is set, copy this peripheral out.
 1848          */
 1849         if (retval & DM_RET_COPY) {
 1850                 int spaceleft, j;
 1851 
 1852                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
 1853                         sizeof(struct dev_match_result));
 1854 
 1855                 /*
 1856                  * If we don't have enough space to put in another
 1857                  * match result, save our position and tell the
 1858                  * user there are more devices to check.
 1859                  */
 1860                 if (spaceleft < sizeof(struct dev_match_result)) {
 1861                         struct periph_driver **pdrv;
 1862 
 1863                         pdrv = NULL;
 1864                         bzero(&cdm->pos, sizeof(cdm->pos));
 1865                         cdm->pos.position_type =
 1866                                 CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR |
 1867                                 CAM_DEV_POS_PERIPH;
 1868 
 1869                         /*
 1870                          * This may look a bit non-sensical, but it is
 1871                          * actually quite logical.  There are very few
 1872                          * peripheral drivers, and bloating every peripheral
 1873                          * structure with a pointer back to its parent
 1874                          * peripheral driver linker set entry would cost
 1875                          * more in the long run than doing this quick lookup.
 1876                          */
 1877                         for (pdrv = periph_drivers; *pdrv != NULL; pdrv++) {
 1878                                 if (strcmp((*pdrv)->driver_name,
 1879                                     periph->periph_name) == 0)
 1880                                         break;
 1881                         }
 1882 
 1883                         if (*pdrv == NULL) {
 1884                                 cdm->status = CAM_DEV_MATCH_ERROR;
 1885                                 return(0);
 1886                         }
 1887 
 1888                         cdm->pos.cookie.pdrv = pdrv;
 1889                         /*
 1890                          * The periph generation slot does double duty, as
 1891                          * does the periph pointer slot.  They are used for
 1892                          * both edt and pdrv lookups and positioning.
 1893                          */
 1894                         cdm->pos.cookie.periph = periph;
 1895                         cdm->pos.generations[CAM_PERIPH_GENERATION] =
 1896                                 (*pdrv)->generation;
 1897                         cdm->status = CAM_DEV_MATCH_MORE;
 1898                         return(0);
 1899                 }
 1900 
 1901                 j = cdm->num_matches;
 1902                 cdm->num_matches++;
 1903                 cdm->matches[j].type = DEV_MATCH_PERIPH;
 1904                 cdm->matches[j].result.periph_result.path_id =
 1905                         periph->path->bus->path_id;
 1906 
 1907                 /*
 1908                  * The transport layer peripheral doesn't have a target or
 1909                  * lun.
 1910                  */
 1911                 if (periph->path->target)
 1912                         cdm->matches[j].result.periph_result.target_id =
 1913                                 periph->path->target->target_id;
 1914                 else
 1915                         cdm->matches[j].result.periph_result.target_id = -1;
 1916 
 1917                 if (periph->path->device)
 1918                         cdm->matches[j].result.periph_result.target_lun =
 1919                                 periph->path->device->lun_id;
 1920                 else
 1921                         cdm->matches[j].result.periph_result.target_lun = -1;
 1922 
 1923                 cdm->matches[j].result.periph_result.unit_number =
 1924                         periph->unit_number;
 1925                 strncpy(cdm->matches[j].result.periph_result.periph_name,
 1926                         periph->periph_name, DEV_IDLEN);
 1927         }
 1928 
 1929         return(1);
 1930 }
 1931 
 1932 static int
 1933 xptperiphlistmatch(struct ccb_dev_match *cdm)
 1934 {
 1935         int ret;
 1936 
 1937         cdm->num_matches = 0;
 1938 
 1939         /*
 1940          * At this point in the edt traversal function, we check the bus
 1941          * list generation to make sure that no busses have been added or
 1942          * removed since the user last sent a XPT_DEV_MATCH ccb through.
 1943          * For the peripheral driver list traversal function, however, we
 1944          * don't have to worry about new peripheral driver types coming or
 1945          * going; they're in a linker set, and therefore can't change
 1946          * without a recompile.
 1947          */
 1948 
 1949         if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
 1950          && (cdm->pos.cookie.pdrv != NULL))
 1951                 ret = xptpdrvtraverse(
 1952                                 (struct periph_driver **)cdm->pos.cookie.pdrv,
 1953                                 xptplistpdrvfunc, cdm);
 1954         else
 1955                 ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm);
 1956 
 1957         /*
 1958          * If we get back 0, that means that we had to stop before fully
 1959          * traversing the peripheral driver tree.  It also means that one of
 1960          * the subroutines has set the status field to the proper value.  If
 1961          * we get back 1, we've fully traversed the EDT and copied out any
 1962          * matching entries.
 1963          */
 1964         if (ret == 1)
 1965                 cdm->status = CAM_DEV_MATCH_LAST;
 1966 
 1967         return(ret);
 1968 }
 1969 
 1970 static int
 1971 xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg)
 1972 {
 1973         struct cam_eb *bus, *next_bus;
 1974         int retval;
 1975 
 1976         retval = 1;
 1977 
 1978         mtx_lock(&xsoftc.xpt_topo_lock);
 1979         for (bus = (start_bus ? start_bus : TAILQ_FIRST(&xsoftc.xpt_busses));
 1980              bus != NULL;
 1981              bus = next_bus) {
 1982 
 1983                 bus->refcount++;
 1984 
 1985                 /*
 1986                  * XXX The locking here is obviously very complex.  We
 1987                  * should work to simplify it.
 1988                  */
 1989                 mtx_unlock(&xsoftc.xpt_topo_lock);
 1990                 CAM_SIM_LOCK(bus->sim);
 1991                 retval = tr_func(bus, arg);
 1992                 CAM_SIM_UNLOCK(bus->sim);
 1993 
 1994                 mtx_lock(&xsoftc.xpt_topo_lock);
 1995                 next_bus = TAILQ_NEXT(bus, links);
 1996                 mtx_unlock(&xsoftc.xpt_topo_lock);
 1997 
 1998                 xpt_release_bus(bus);
 1999 
 2000                 if (retval == 0)
 2001                         return(retval);
 2002                 mtx_lock(&xsoftc.xpt_topo_lock);
 2003         }
 2004         mtx_unlock(&xsoftc.xpt_topo_lock);
 2005 
 2006         return(retval);
 2007 }
 2008 
 2009 int
 2010 xpt_sim_opened(struct cam_sim *sim)
 2011 {
 2012         struct cam_eb *bus;
 2013         struct cam_et *target;
 2014         struct cam_ed *device;
 2015         struct cam_periph *periph;
 2016 
 2017         KASSERT(sim->refcount >= 1, ("sim->refcount >= 1"));
 2018         mtx_assert(sim->mtx, MA_OWNED);
 2019 
 2020         mtx_lock(&xsoftc.xpt_topo_lock);
 2021         TAILQ_FOREACH(bus, &xsoftc.xpt_busses, links) {
 2022                 if (bus->sim != sim)
 2023                         continue;
 2024 
 2025                 TAILQ_FOREACH(target, &bus->et_entries, links) {
 2026                         TAILQ_FOREACH(device, &target->ed_entries, links) {
 2027                                 SLIST_FOREACH(periph, &device->periphs,
 2028                                     periph_links) {
 2029                                         if (periph->refcount > 0) {
 2030                                                 mtx_unlock(&xsoftc.xpt_topo_lock);
 2031                                                 return (1);
 2032                                         }
 2033                                 }
 2034                         }
 2035                 }
 2036         }
 2037 
 2038         mtx_unlock(&xsoftc.xpt_topo_lock);
 2039         return (0);
 2040 }
 2041 
 2042 static int
 2043 xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target,
 2044                   xpt_targetfunc_t *tr_func, void *arg)
 2045 {
 2046         struct cam_et *target, *next_target;
 2047         int retval;
 2048 
 2049         retval = 1;
 2050         for (target = (start_target ? start_target :
 2051                        TAILQ_FIRST(&bus->et_entries));
 2052              target != NULL; target = next_target) {
 2053 
 2054                 target->refcount++;
 2055 
 2056                 retval = tr_func(target, arg);
 2057 
 2058                 next_target = TAILQ_NEXT(target, links);
 2059 
 2060                 xpt_release_target(target);
 2061 
 2062                 if (retval == 0)
 2063                         return(retval);
 2064         }
 2065 
 2066         return(retval);
 2067 }
 2068 
 2069 static int
 2070 xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device,
 2071                   xpt_devicefunc_t *tr_func, void *arg)
 2072 {
 2073         struct cam_ed *device, *next_device;
 2074         int retval;
 2075 
 2076         retval = 1;
 2077         for (device = (start_device ? start_device :
 2078                        TAILQ_FIRST(&target->ed_entries));
 2079              device != NULL;
 2080              device = next_device) {
 2081 
 2082                 /*
 2083                  * Hold a reference so the current device does not go away
 2084                  * on us.
 2085                  */
 2086                 device->refcount++;
 2087 
 2088                 retval = tr_func(device, arg);
 2089 
 2090                 /*
 2091                  * Grab our next pointer before we release the current
 2092                  * device.
 2093                  */
 2094                 next_device = TAILQ_NEXT(device, links);
 2095 
 2096                 xpt_release_device(device);
 2097 
 2098                 if (retval == 0)
 2099                         return(retval);
 2100         }
 2101 
 2102         return(retval);
 2103 }
 2104 
 2105 static int
 2106 xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph,
 2107                   xpt_periphfunc_t *tr_func, void *arg)
 2108 {
 2109         struct cam_periph *periph, *next_periph;
 2110         int retval;
 2111 
 2112         retval = 1;
 2113 
 2114         xpt_lock_buses();
 2115         for (periph = (start_periph ? start_periph :
 2116                        SLIST_FIRST(&device->periphs));
 2117              periph != NULL;
 2118              periph = next_periph) {
 2119 
 2120 
 2121                 /*
 2122                  * In this case, we want to show peripherals that have been
 2123                  * invalidated, but not peripherals that are scheduled to
 2124                  * be freed.  So instead of calling cam_periph_acquire(),
 2125                  * which will fail if the periph has been invalidated, we
 2126                  * just check for the free flag here.  If it is free, we
 2127                  * skip to the next periph.
 2128                  */
 2129                 if (periph->flags & CAM_PERIPH_FREE) {
 2130                         next_periph = SLIST_NEXT(periph, periph_links);
 2131                         continue;
 2132                 }
 2133 
 2134                 /*
 2135                  * Acquire a reference to this periph while we call the
 2136                  * traversal function, so it can't go away.
 2137                  */
 2138                 periph->refcount++;
 2139 
 2140                 xpt_unlock_buses();
 2141 
 2142                 retval = tr_func(periph, arg);
 2143 
 2144                 /*
 2145                  * We need the lock for list traversal.
 2146                  */
 2147                 xpt_lock_buses();
 2148 
 2149                 /*
 2150                  * Grab the next peripheral before we release this one, so
 2151                  * our next pointer is still valid.
 2152                  */
 2153                 next_periph = SLIST_NEXT(periph, periph_links);
 2154 
 2155                 cam_periph_release_locked_buses(periph);
 2156 
 2157                 if (retval == 0)
 2158                         goto bailout_done;
 2159         }
 2160 
 2161 bailout_done:
 2162 
 2163         xpt_unlock_buses();
 2164 
 2165         return(retval);
 2166 }
 2167 
 2168 static int
 2169 xptpdrvtraverse(struct periph_driver **start_pdrv,
 2170                 xpt_pdrvfunc_t *tr_func, void *arg)
 2171 {
 2172         struct periph_driver **pdrv;
 2173         int retval;
 2174 
 2175         retval = 1;
 2176 
 2177         /*
 2178          * We don't traverse the peripheral driver list like we do the
 2179          * other lists, because it is a linker set, and therefore cannot be
 2180          * changed during runtime.  If the peripheral driver list is ever
 2181          * re-done to be something other than a linker set (i.e. it can
 2182          * change while the system is running), the list traversal should
 2183          * be modified to work like the other traversal functions.
 2184          */
 2185         for (pdrv = (start_pdrv ? start_pdrv : periph_drivers);
 2186              *pdrv != NULL; pdrv++) {
 2187                 retval = tr_func(pdrv, arg);
 2188 
 2189                 if (retval == 0)
 2190                         return(retval);
 2191         }
 2192 
 2193         return(retval);
 2194 }
 2195 
 2196 static int
 2197 xptpdperiphtraverse(struct periph_driver **pdrv,
 2198                     struct cam_periph *start_periph,
 2199                     xpt_periphfunc_t *tr_func, void *arg)
 2200 {
 2201         struct cam_periph *periph, *next_periph;
 2202         int retval;
 2203 
 2204         retval = 1;
 2205 
 2206         xpt_lock_buses();
 2207         for (periph = (start_periph ? start_periph :
 2208              TAILQ_FIRST(&(*pdrv)->units)); periph != NULL;
 2209              periph = next_periph) {
 2210 
 2211 
 2212                 /*
 2213                  * In this case, we want to show peripherals that have been
 2214                  * invalidated, but not peripherals that are scheduled to
 2215                  * be freed.  So instead of calling cam_periph_acquire(),
 2216                  * which will fail if the periph has been invalidated, we
 2217                  * just check for the free flag here.  If it is free, we
 2218                  * skip to the next periph.
 2219                  */
 2220                 if (periph->flags & CAM_PERIPH_FREE) {
 2221                         next_periph = TAILQ_NEXT(periph, unit_links);
 2222                         continue;
 2223                 }
 2224 
 2225                 /*
 2226                  * Acquire a reference to this periph while we call the
 2227                  * traversal function, so it can't go away.
 2228                  */
 2229                 periph->refcount++;
 2230 
 2231                 /*
 2232                  * XXX KDM we have the toplogy lock here, but in
 2233                  * xptperiphtraverse(), we drop it before calling the
 2234                  * traversal function.  Which is correct?
 2235                  */
 2236                 retval = tr_func(periph, arg);
 2237 
 2238                 /*
 2239                  * Grab the next peripheral before we release this one, so
 2240                  * our next pointer is still valid.
 2241                  */
 2242                 next_periph = TAILQ_NEXT(periph, unit_links);
 2243 
 2244                 cam_periph_release_locked_buses(periph);
 2245 
 2246                 if (retval == 0)
 2247                         goto bailout_done;
 2248         }
 2249 bailout_done:
 2250 
 2251         xpt_unlock_buses();
 2252 
 2253         return(retval);
 2254 }
 2255 
 2256 static int
 2257 xptdefbusfunc(struct cam_eb *bus, void *arg)
 2258 {
 2259         struct xpt_traverse_config *tr_config;
 2260 
 2261         tr_config = (struct xpt_traverse_config *)arg;
 2262 
 2263         if (tr_config->depth == XPT_DEPTH_BUS) {
 2264                 xpt_busfunc_t *tr_func;
 2265 
 2266                 tr_func = (xpt_busfunc_t *)tr_config->tr_func;
 2267 
 2268                 return(tr_func(bus, tr_config->tr_arg));
 2269         } else
 2270                 return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg));
 2271 }
 2272 
 2273 static int
 2274 xptdeftargetfunc(struct cam_et *target, void *arg)
 2275 {
 2276         struct xpt_traverse_config *tr_config;
 2277 
 2278         tr_config = (struct xpt_traverse_config *)arg;
 2279 
 2280         if (tr_config->depth == XPT_DEPTH_TARGET) {
 2281                 xpt_targetfunc_t *tr_func;
 2282 
 2283                 tr_func = (xpt_targetfunc_t *)tr_config->tr_func;
 2284 
 2285                 return(tr_func(target, tr_config->tr_arg));
 2286         } else
 2287                 return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg));
 2288 }
 2289 
 2290 static int
 2291 xptdefdevicefunc(struct cam_ed *device, void *arg)
 2292 {
 2293         struct xpt_traverse_config *tr_config;
 2294 
 2295         tr_config = (struct xpt_traverse_config *)arg;
 2296 
 2297         if (tr_config->depth == XPT_DEPTH_DEVICE) {
 2298                 xpt_devicefunc_t *tr_func;
 2299 
 2300                 tr_func = (xpt_devicefunc_t *)tr_config->tr_func;
 2301 
 2302                 return(tr_func(device, tr_config->tr_arg));
 2303         } else
 2304                 return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg));
 2305 }
 2306 
 2307 static int
 2308 xptdefperiphfunc(struct cam_periph *periph, void *arg)
 2309 {
 2310         struct xpt_traverse_config *tr_config;
 2311         xpt_periphfunc_t *tr_func;
 2312 
 2313         tr_config = (struct xpt_traverse_config *)arg;
 2314 
 2315         tr_func = (xpt_periphfunc_t *)tr_config->tr_func;
 2316 
 2317         /*
 2318          * Unlike the other default functions, we don't check for depth
 2319          * here.  The peripheral driver level is the last level in the EDT,
 2320          * so if we're here, we should execute the function in question.
 2321          */
 2322         return(tr_func(periph, tr_config->tr_arg));
 2323 }
 2324 
 2325 /*
 2326  * Execute the given function for every bus in the EDT.
 2327  */
 2328 static int
 2329 xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg)
 2330 {
 2331         struct xpt_traverse_config tr_config;
 2332 
 2333         tr_config.depth = XPT_DEPTH_BUS;
 2334         tr_config.tr_func = tr_func;
 2335         tr_config.tr_arg = arg;
 2336 
 2337         return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
 2338 }
 2339 
 2340 /*
 2341  * Execute the given function for every device in the EDT.
 2342  */
 2343 static int
 2344 xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg)
 2345 {
 2346         struct xpt_traverse_config tr_config;
 2347 
 2348         tr_config.depth = XPT_DEPTH_DEVICE;
 2349         tr_config.tr_func = tr_func;
 2350         tr_config.tr_arg = arg;
 2351 
 2352         return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
 2353 }
 2354 
 2355 static int
 2356 xptsetasyncfunc(struct cam_ed *device, void *arg)
 2357 {
 2358         struct cam_path path;
 2359         struct ccb_getdev cgd;
 2360         struct ccb_setasync *csa = (struct ccb_setasync *)arg;
 2361 
 2362         /*
 2363          * Don't report unconfigured devices (Wildcard devs,
 2364          * devices only for target mode, device instances
 2365          * that have been invalidated but are waiting for
 2366          * their last reference count to be released).
 2367          */
 2368         if ((device->flags & CAM_DEV_UNCONFIGURED) != 0)
 2369                 return (1);
 2370 
 2371         xpt_compile_path(&path,
 2372                          NULL,
 2373                          device->target->bus->path_id,
 2374                          device->target->target_id,
 2375                          device->lun_id);
 2376         xpt_setup_ccb(&cgd.ccb_h, &path, CAM_PRIORITY_NORMAL);
 2377         cgd.ccb_h.func_code = XPT_GDEV_TYPE;
 2378         xpt_action((union ccb *)&cgd);
 2379         csa->callback(csa->callback_arg,
 2380                             AC_FOUND_DEVICE,
 2381                             &path, &cgd);
 2382         xpt_release_path(&path);
 2383 
 2384         return(1);
 2385 }
 2386 
 2387 static int
 2388 xptsetasyncbusfunc(struct cam_eb *bus, void *arg)
 2389 {
 2390         struct cam_path path;
 2391         struct ccb_pathinq cpi;
 2392         struct ccb_setasync *csa = (struct ccb_setasync *)arg;
 2393 
 2394         xpt_compile_path(&path, /*periph*/NULL,
 2395                          bus->sim->path_id,
 2396                          CAM_TARGET_WILDCARD,
 2397                          CAM_LUN_WILDCARD);
 2398         xpt_setup_ccb(&cpi.ccb_h, &path, CAM_PRIORITY_NORMAL);
 2399         cpi.ccb_h.func_code = XPT_PATH_INQ;
 2400         xpt_action((union ccb *)&cpi);
 2401         csa->callback(csa->callback_arg,
 2402                             AC_PATH_REGISTERED,
 2403                             &path, &cpi);
 2404         xpt_release_path(&path);
 2405 
 2406         return(1);
 2407 }
 2408 
 2409 void
 2410 xpt_action(union ccb *start_ccb)
 2411 {
 2412 
 2413         CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_action\n"));
 2414 
 2415         start_ccb->ccb_h.status = CAM_REQ_INPROG;
 2416         (*(start_ccb->ccb_h.path->bus->xport->action))(start_ccb);
 2417 }
 2418 
 2419 void
 2420 xpt_action_default(union ccb *start_ccb)
 2421 {
 2422         char cdb_str[(SCSI_MAX_CDBLEN * 3) + 1];
 2423         struct cam_path *path;
 2424 
 2425         path = start_ccb->ccb_h.path;
 2426         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_action_default\n"));
 2427 
 2428         switch (start_ccb->ccb_h.func_code) {
 2429         case XPT_SCSI_IO:
 2430         {
 2431                 struct cam_ed *device;
 2432 
 2433                 /*
 2434                  * For the sake of compatibility with SCSI-1
 2435                  * devices that may not understand the identify
 2436                  * message, we include lun information in the
 2437                  * second byte of all commands.  SCSI-1 specifies
 2438                  * that luns are a 3 bit value and reserves only 3
 2439                  * bits for lun information in the CDB.  Later
 2440                  * revisions of the SCSI spec allow for more than 8
 2441                  * luns, but have deprecated lun information in the
 2442                  * CDB.  So, if the lun won't fit, we must omit.
 2443                  *
 2444                  * Also be aware that during initial probing for devices,
 2445                  * the inquiry information is unknown but initialized to 0.
 2446                  * This means that this code will be exercised while probing
 2447                  * devices with an ANSI revision greater than 2.
 2448                  */
 2449                 device = path->device;
 2450                 if (device->protocol_version <= SCSI_REV_2
 2451                  && start_ccb->ccb_h.target_lun < 8
 2452                  && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) {
 2453 
 2454                         start_ccb->csio.cdb_io.cdb_bytes[1] |=
 2455                             start_ccb->ccb_h.target_lun << 5;
 2456                 }
 2457                 start_ccb->csio.scsi_status = SCSI_STATUS_OK;
 2458                 CAM_DEBUG(path, CAM_DEBUG_CDB,("%s. CDB: %s\n",
 2459                           scsi_op_desc(start_ccb->csio.cdb_io.cdb_bytes[0],
 2460                                        &path->device->inq_data),
 2461                           scsi_cdb_string(start_ccb->csio.cdb_io.cdb_bytes,
 2462                                           cdb_str, sizeof(cdb_str))));
 2463         }
 2464         /* FALLTHROUGH */
 2465         case XPT_TARGET_IO:
 2466         case XPT_CONT_TARGET_IO:
 2467                 start_ccb->csio.sense_resid = 0;
 2468                 start_ccb->csio.resid = 0;
 2469                 /* FALLTHROUGH */
 2470         case XPT_ATA_IO:
 2471                 if (start_ccb->ccb_h.func_code == XPT_ATA_IO) {
 2472                         start_ccb->ataio.resid = 0;
 2473                         CAM_DEBUG(path, CAM_DEBUG_CDB,("%s. ACB: %s\n",
 2474                             ata_op_string(&start_ccb->ataio.cmd),
 2475                             ata_cmd_string(&start_ccb->ataio.cmd,
 2476                                           cdb_str, sizeof(cdb_str))));
 2477                 }
 2478                 /* FALLTHROUGH */
 2479         case XPT_RESET_DEV:
 2480         case XPT_ENG_EXEC:
 2481         {
 2482                 int frozen;
 2483 
 2484                 frozen = cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb);
 2485                 path->device->sim->devq->alloc_openings += frozen;
 2486                 if (frozen > 0)
 2487                         xpt_run_dev_allocq(path->bus);
 2488                 if (xpt_schedule_dev_sendq(path->bus, path->device))
 2489                         xpt_run_dev_sendq(path->bus);
 2490                 break;
 2491         }
 2492         case XPT_CALC_GEOMETRY:
 2493         {
 2494                 struct cam_sim *sim;
 2495 
 2496                 /* Filter out garbage */
 2497                 if (start_ccb->ccg.block_size == 0
 2498                  || start_ccb->ccg.volume_size == 0) {
 2499                         start_ccb->ccg.cylinders = 0;
 2500                         start_ccb->ccg.heads = 0;
 2501                         start_ccb->ccg.secs_per_track = 0;
 2502                         start_ccb->ccb_h.status = CAM_REQ_CMP;
 2503                         break;
 2504                 }
 2505 #if defined(PC98) || defined(__sparc64__)
 2506                 /*
 2507                  * In a PC-98 system, geometry translation depens on
 2508                  * the "real" device geometry obtained from mode page 4.
 2509                  * SCSI geometry translation is performed in the
 2510                  * initialization routine of the SCSI BIOS and the result
 2511                  * stored in host memory.  If the translation is available
 2512                  * in host memory, use it.  If not, rely on the default
 2513                  * translation the device driver performs.
 2514                  * For sparc64, we may need adjust the geometry of large
 2515                  * disks in order to fit the limitations of the 16-bit
 2516                  * fields of the VTOC8 disk label.
 2517                  */
 2518                 if (scsi_da_bios_params(&start_ccb->ccg) != 0) {
 2519                         start_ccb->ccb_h.status = CAM_REQ_CMP;
 2520                         break;
 2521                 }
 2522 #endif
 2523                 sim = path->bus->sim;
 2524                 (*(sim->sim_action))(sim, start_ccb);
 2525                 break;
 2526         }
 2527         case XPT_ABORT:
 2528         {
 2529                 union ccb* abort_ccb;
 2530 
 2531                 abort_ccb = start_ccb->cab.abort_ccb;
 2532                 if (XPT_FC_IS_DEV_QUEUED(abort_ccb)) {
 2533 
 2534                         if (abort_ccb->ccb_h.pinfo.index >= 0) {
 2535                                 struct cam_ccbq *ccbq;
 2536                                 struct cam_ed *device;
 2537 
 2538                                 device = abort_ccb->ccb_h.path->device;
 2539                                 ccbq = &device->ccbq;
 2540                                 device->sim->devq->alloc_openings -= 
 2541                                     cam_ccbq_remove_ccb(ccbq, abort_ccb);
 2542                                 abort_ccb->ccb_h.status =
 2543                                     CAM_REQ_ABORTED|CAM_DEV_QFRZN;
 2544                                 xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
 2545                                 xpt_done(abort_ccb);
 2546                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 2547                                 break;
 2548                         }
 2549                         if (abort_ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX
 2550                          && (abort_ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) {
 2551                                 /*
 2552                                  * We've caught this ccb en route to
 2553                                  * the SIM.  Flag it for abort and the
 2554                                  * SIM will do so just before starting
 2555                                  * real work on the CCB.
 2556                                  */
 2557                                 abort_ccb->ccb_h.status =
 2558                                     CAM_REQ_ABORTED|CAM_DEV_QFRZN;
 2559                                 xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
 2560                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 2561                                 break;
 2562                         }
 2563                 }
 2564                 if (XPT_FC_IS_QUEUED(abort_ccb)
 2565                  && (abort_ccb->ccb_h.pinfo.index == CAM_DONEQ_INDEX)) {
 2566                         /*
 2567                          * It's already completed but waiting
 2568                          * for our SWI to get to it.
 2569                          */
 2570                         start_ccb->ccb_h.status = CAM_UA_ABORT;
 2571                         break;
 2572                 }
 2573                 /*
 2574                  * If we weren't able to take care of the abort request
 2575                  * in the XPT, pass the request down to the SIM for processing.
 2576                  */
 2577         }
 2578         /* FALLTHROUGH */
 2579         case XPT_ACCEPT_TARGET_IO:
 2580         case XPT_EN_LUN:
 2581         case XPT_IMMED_NOTIFY:
 2582         case XPT_NOTIFY_ACK:
 2583         case XPT_RESET_BUS:
 2584         case XPT_IMMEDIATE_NOTIFY:
 2585         case XPT_NOTIFY_ACKNOWLEDGE:
 2586         case XPT_GET_SIM_KNOB:
 2587         case XPT_SET_SIM_KNOB:
 2588         {
 2589                 struct cam_sim *sim;
 2590 
 2591                 sim = path->bus->sim;
 2592                 (*(sim->sim_action))(sim, start_ccb);
 2593                 break;
 2594         }
 2595         case XPT_PATH_INQ:
 2596         {
 2597                 struct cam_sim *sim;
 2598 
 2599                 sim = path->bus->sim;
 2600                 (*(sim->sim_action))(sim, start_ccb);
 2601                 break;
 2602         }
 2603         case XPT_PATH_STATS:
 2604                 start_ccb->cpis.last_reset = path->bus->last_reset;
 2605                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 2606                 break;
 2607         case XPT_GDEV_TYPE:
 2608         {
 2609                 struct cam_ed *dev;
 2610 
 2611                 dev = path->device;
 2612                 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
 2613                         start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
 2614                 } else {
 2615                         struct ccb_getdev *cgd;
 2616 
 2617                         cgd = &start_ccb->cgd;
 2618                         cgd->protocol = dev->protocol;
 2619                         cgd->inq_data = dev->inq_data;
 2620                         cgd->ident_data = dev->ident_data;
 2621                         cgd->inq_flags = dev->inq_flags;
 2622                         cgd->ccb_h.status = CAM_REQ_CMP;
 2623                         cgd->serial_num_len = dev->serial_num_len;
 2624                         if ((dev->serial_num_len > 0)
 2625                          && (dev->serial_num != NULL))
 2626                                 bcopy(dev->serial_num, cgd->serial_num,
 2627                                       dev->serial_num_len);
 2628                 }
 2629                 break;
 2630         }
 2631         case XPT_GDEV_STATS:
 2632         {
 2633                 struct cam_ed *dev;
 2634 
 2635                 dev = path->device;
 2636                 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
 2637                         start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
 2638                 } else {
 2639                         struct ccb_getdevstats *cgds;
 2640                         struct cam_eb *bus;
 2641                         struct cam_et *tar;
 2642 
 2643                         cgds = &start_ccb->cgds;
 2644                         bus = path->bus;
 2645                         tar = path->target;
 2646                         cgds->dev_openings = dev->ccbq.dev_openings;
 2647                         cgds->dev_active = dev->ccbq.dev_active;
 2648                         cgds->devq_openings = dev->ccbq.devq_openings;
 2649                         cgds->devq_queued = dev->ccbq.queue.entries;
 2650                         cgds->held = dev->ccbq.held;
 2651                         cgds->last_reset = tar->last_reset;
 2652                         cgds->maxtags = dev->maxtags;
 2653                         cgds->mintags = dev->mintags;
 2654                         if (timevalcmp(&tar->last_reset, &bus->last_reset, <))
 2655                                 cgds->last_reset = bus->last_reset;
 2656                         cgds->ccb_h.status = CAM_REQ_CMP;
 2657                 }
 2658                 break;
 2659         }
 2660         case XPT_GDEVLIST:
 2661         {
 2662                 struct cam_periph       *nperiph;
 2663                 struct periph_list      *periph_head;
 2664                 struct ccb_getdevlist   *cgdl;
 2665                 u_int                   i;
 2666                 struct cam_ed           *device;
 2667                 int                     found;
 2668 
 2669 
 2670                 found = 0;
 2671 
 2672                 /*
 2673                  * Don't want anyone mucking with our data.
 2674                  */
 2675                 device = path->device;
 2676                 periph_head = &device->periphs;
 2677                 cgdl = &start_ccb->cgdl;
 2678 
 2679                 /*
 2680                  * Check and see if the list has changed since the user
 2681                  * last requested a list member.  If so, tell them that the
 2682                  * list has changed, and therefore they need to start over
 2683                  * from the beginning.
 2684                  */
 2685                 if ((cgdl->index != 0) &&
 2686                     (cgdl->generation != device->generation)) {
 2687                         cgdl->status = CAM_GDEVLIST_LIST_CHANGED;
 2688                         break;
 2689                 }
 2690 
 2691                 /*
 2692                  * Traverse the list of peripherals and attempt to find
 2693                  * the requested peripheral.
 2694                  */
 2695                 for (nperiph = SLIST_FIRST(periph_head), i = 0;
 2696                      (nperiph != NULL) && (i <= cgdl->index);
 2697                      nperiph = SLIST_NEXT(nperiph, periph_links), i++) {
 2698                         if (i == cgdl->index) {
 2699                                 strncpy(cgdl->periph_name,
 2700                                         nperiph->periph_name,
 2701                                         DEV_IDLEN);
 2702                                 cgdl->unit_number = nperiph->unit_number;
 2703                                 found = 1;
 2704                         }
 2705                 }
 2706                 if (found == 0) {
 2707                         cgdl->status = CAM_GDEVLIST_ERROR;
 2708                         break;
 2709                 }
 2710 
 2711                 if (nperiph == NULL)
 2712                         cgdl->status = CAM_GDEVLIST_LAST_DEVICE;
 2713                 else
 2714                         cgdl->status = CAM_GDEVLIST_MORE_DEVS;
 2715 
 2716                 cgdl->index++;
 2717                 cgdl->generation = device->generation;
 2718 
 2719                 cgdl->ccb_h.status = CAM_REQ_CMP;
 2720                 break;
 2721         }
 2722         case XPT_DEV_MATCH:
 2723         {
 2724                 dev_pos_type position_type;
 2725                 struct ccb_dev_match *cdm;
 2726 
 2727                 cdm = &start_ccb->cdm;
 2728 
 2729                 /*
 2730                  * There are two ways of getting at information in the EDT.
 2731                  * The first way is via the primary EDT tree.  It starts
 2732                  * with a list of busses, then a list of targets on a bus,
 2733                  * then devices/luns on a target, and then peripherals on a
 2734                  * device/lun.  The "other" way is by the peripheral driver
 2735                  * lists.  The peripheral driver lists are organized by
 2736                  * peripheral driver.  (obviously)  So it makes sense to
 2737                  * use the peripheral driver list if the user is looking
 2738                  * for something like "da1", or all "da" devices.  If the
 2739                  * user is looking for something on a particular bus/target
 2740                  * or lun, it's generally better to go through the EDT tree.
 2741                  */
 2742 
 2743                 if (cdm->pos.position_type != CAM_DEV_POS_NONE)
 2744                         position_type = cdm->pos.position_type;
 2745                 else {
 2746                         u_int i;
 2747 
 2748                         position_type = CAM_DEV_POS_NONE;
 2749 
 2750                         for (i = 0; i < cdm->num_patterns; i++) {
 2751                                 if ((cdm->patterns[i].type == DEV_MATCH_BUS)
 2752                                  ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){
 2753                                         position_type = CAM_DEV_POS_EDT;
 2754                                         break;
 2755                                 }
 2756                         }
 2757 
 2758                         if (cdm->num_patterns == 0)
 2759                                 position_type = CAM_DEV_POS_EDT;
 2760                         else if (position_type == CAM_DEV_POS_NONE)
 2761                                 position_type = CAM_DEV_POS_PDRV;
 2762                 }
 2763 
 2764                 switch(position_type & CAM_DEV_POS_TYPEMASK) {
 2765                 case CAM_DEV_POS_EDT:
 2766                         xptedtmatch(cdm);
 2767                         break;
 2768                 case CAM_DEV_POS_PDRV:
 2769                         xptperiphlistmatch(cdm);
 2770                         break;
 2771                 default:
 2772                         cdm->status = CAM_DEV_MATCH_ERROR;
 2773                         break;
 2774                 }
 2775 
 2776                 if (cdm->status == CAM_DEV_MATCH_ERROR)
 2777                         start_ccb->ccb_h.status = CAM_REQ_CMP_ERR;
 2778                 else
 2779                         start_ccb->ccb_h.status = CAM_REQ_CMP;
 2780 
 2781                 break;
 2782         }
 2783         case XPT_SASYNC_CB:
 2784         {
 2785                 struct ccb_setasync *csa;
 2786                 struct async_node *cur_entry;
 2787                 struct async_list *async_head;
 2788                 u_int32_t added;
 2789 
 2790                 csa = &start_ccb->csa;
 2791                 added = csa->event_enable;
 2792                 async_head = &path->device->asyncs;
 2793 
 2794                 /*
 2795                  * If there is already an entry for us, simply
 2796                  * update it.
 2797                  */
 2798                 cur_entry = SLIST_FIRST(async_head);
 2799                 while (cur_entry != NULL) {
 2800                         if ((cur_entry->callback_arg == csa->callback_arg)
 2801                          && (cur_entry->callback == csa->callback))
 2802                                 break;
 2803                         cur_entry = SLIST_NEXT(cur_entry, links);
 2804                 }
 2805 
 2806                 if (cur_entry != NULL) {
 2807                         /*
 2808                          * If the request has no flags set,
 2809                          * remove the entry.
 2810                          */
 2811                         added &= ~cur_entry->event_enable;
 2812                         if (csa->event_enable == 0) {
 2813                                 SLIST_REMOVE(async_head, cur_entry,
 2814                                              async_node, links);
 2815                                 xpt_release_device(path->device);
 2816                                 free(cur_entry, M_CAMXPT);
 2817                         } else {
 2818                                 cur_entry->event_enable = csa->event_enable;
 2819                         }
 2820                         csa->event_enable = added;
 2821                 } else {
 2822                         cur_entry = malloc(sizeof(*cur_entry), M_CAMXPT,
 2823                                            M_NOWAIT);
 2824                         if (cur_entry == NULL) {
 2825                                 csa->ccb_h.status = CAM_RESRC_UNAVAIL;
 2826                                 break;
 2827                         }
 2828                         cur_entry->event_enable = csa->event_enable;
 2829                         cur_entry->callback_arg = csa->callback_arg;
 2830                         cur_entry->callback = csa->callback;
 2831                         SLIST_INSERT_HEAD(async_head, cur_entry, links);
 2832                         xpt_acquire_device(path->device);
 2833                 }
 2834                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 2835                 break;
 2836         }
 2837         case XPT_REL_SIMQ:
 2838         {
 2839                 struct ccb_relsim *crs;
 2840                 struct cam_ed *dev;
 2841 
 2842                 crs = &start_ccb->crs;
 2843                 dev = path->device;
 2844                 if (dev == NULL) {
 2845 
 2846                         crs->ccb_h.status = CAM_DEV_NOT_THERE;
 2847                         break;
 2848                 }
 2849 
 2850                 if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) {
 2851 
 2852                         /* Don't ever go below one opening */
 2853                         if (crs->openings > 0) {
 2854                                 xpt_dev_ccbq_resize(path, crs->openings);
 2855                                 if (bootverbose) {
 2856                                         xpt_print(path,
 2857                                             "number of openings is now %d\n",
 2858                                             crs->openings);
 2859                                 }
 2860                         }
 2861                 }
 2862 
 2863                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) {
 2864 
 2865                         if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
 2866 
 2867                                 /*
 2868                                  * Just extend the old timeout and decrement
 2869                                  * the freeze count so that a single timeout
 2870                                  * is sufficient for releasing the queue.
 2871                                  */
 2872                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
 2873                                 callout_stop(&dev->callout);
 2874                         } else {
 2875 
 2876                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
 2877                         }
 2878 
 2879                         callout_reset(&dev->callout,
 2880                             (crs->release_timeout * hz) / 1000,
 2881                             xpt_release_devq_timeout, dev);
 2882 
 2883                         dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING;
 2884 
 2885                 }
 2886 
 2887                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) {
 2888 
 2889                         if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) {
 2890                                 /*
 2891                                  * Decrement the freeze count so that a single
 2892                                  * completion is still sufficient to unfreeze
 2893                                  * the queue.
 2894                                  */
 2895                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
 2896                         } else {
 2897 
 2898                                 dev->flags |= CAM_DEV_REL_ON_COMPLETE;
 2899                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
 2900                         }
 2901                 }
 2902 
 2903                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) {
 2904 
 2905                         if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
 2906                          || (dev->ccbq.dev_active == 0)) {
 2907 
 2908                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
 2909                         } else {
 2910 
 2911                                 dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY;
 2912                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
 2913                         }
 2914                 }
 2915 
 2916                 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0) {
 2917                         xpt_release_devq_rl(path, /*runlevel*/
 2918                             (crs->release_flags & RELSIM_RELEASE_RUNLEVEL) ?
 2919                                 crs->release_timeout : 0,
 2920                             /*count*/1, /*run_queue*/TRUE);
 2921                 }
 2922                 start_ccb->crs.qfrozen_cnt = dev->ccbq.queue.qfrozen_cnt[0];
 2923                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 2924                 break;
 2925         }
 2926         case XPT_DEBUG: {
 2927                 /* Check that all request bits are supported. */
 2928                 if (start_ccb->cdbg.flags & ~(CAM_DEBUG_COMPILE)) {
 2929                         start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
 2930                         break;
 2931                 }
 2932 
 2933                 cam_dflags = start_ccb->cdbg.flags;
 2934                 if (cam_dpath != NULL) {
 2935                         xpt_free_path(cam_dpath);
 2936                         cam_dpath = NULL;
 2937                 }
 2938                 if (cam_dflags != CAM_DEBUG_NONE) {
 2939                         if (xpt_create_path(&cam_dpath, xpt_periph,
 2940                                             start_ccb->ccb_h.path_id,
 2941                                             start_ccb->ccb_h.target_id,
 2942                                             start_ccb->ccb_h.target_lun) !=
 2943                                             CAM_REQ_CMP) {
 2944                                 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
 2945                                 cam_dflags = CAM_DEBUG_NONE;
 2946                         } else {
 2947                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 2948                                 xpt_print(cam_dpath, "debugging flags now %x\n",
 2949                                     cam_dflags);
 2950                         }
 2951                 } else {
 2952                         cam_dpath = NULL;
 2953                         start_ccb->ccb_h.status = CAM_REQ_CMP;
 2954                 }
 2955                 break;
 2956         }
 2957         case XPT_FREEZE_QUEUE:
 2958         {
 2959                 struct ccb_relsim *crs = &start_ccb->crs;
 2960 
 2961                 xpt_freeze_devq_rl(path, /*runlevel*/
 2962                     (crs->release_flags & RELSIM_RELEASE_RUNLEVEL) ?
 2963                     crs->release_timeout : 0, /*count*/1);
 2964                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 2965                 break;
 2966         }
 2967         case XPT_NOOP:
 2968                 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0)
 2969                         xpt_freeze_devq(path, 1);
 2970                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 2971                 break;
 2972         default:
 2973         case XPT_SDEV_TYPE:
 2974         case XPT_TERM_IO:
 2975         case XPT_ENG_INQ:
 2976                 /* XXX Implement */
 2977                 start_ccb->ccb_h.status = CAM_PROVIDE_FAIL;
 2978                 if (start_ccb->ccb_h.func_code & XPT_FC_DEV_QUEUED) {
 2979                         xpt_done(start_ccb);
 2980                 }
 2981                 break;
 2982         }
 2983 }
 2984 
 2985 void
 2986 xpt_polled_action(union ccb *start_ccb)
 2987 {
 2988         u_int32_t timeout;
 2989         struct    cam_sim *sim;
 2990         struct    cam_devq *devq;
 2991         struct    cam_ed *dev;
 2992 
 2993 
 2994         timeout = start_ccb->ccb_h.timeout * 10;
 2995         sim = start_ccb->ccb_h.path->bus->sim;
 2996         devq = sim->devq;
 2997         dev = start_ccb->ccb_h.path->device;
 2998 
 2999         mtx_assert(sim->mtx, MA_OWNED);
 3000 
 3001         /* Don't use ISR for this SIM while polling. */
 3002         sim->flags |= CAM_SIM_POLLED;
 3003 
 3004         /*
 3005          * Steal an opening so that no other queued requests
 3006          * can get it before us while we simulate interrupts.
 3007          */
 3008         dev->ccbq.devq_openings--;
 3009         dev->ccbq.dev_openings--;
 3010 
 3011         while(((devq != NULL && devq->send_openings <= 0) ||
 3012            dev->ccbq.dev_openings < 0) && (--timeout > 0)) {
 3013                 DELAY(100);
 3014                 (*(sim->sim_poll))(sim);
 3015                 camisr_runqueue(&sim->sim_doneq);
 3016         }
 3017 
 3018         dev->ccbq.devq_openings++;
 3019         dev->ccbq.dev_openings++;
 3020 
 3021         if (timeout != 0) {
 3022                 xpt_action(start_ccb);
 3023                 while(--timeout > 0) {
 3024                         (*(sim->sim_poll))(sim);
 3025                         camisr_runqueue(&sim->sim_doneq);
 3026                         if ((start_ccb->ccb_h.status  & CAM_STATUS_MASK)
 3027                             != CAM_REQ_INPROG)
 3028                                 break;
 3029                         DELAY(100);
 3030                 }
 3031                 if (timeout == 0) {
 3032                         /*
 3033                          * XXX Is it worth adding a sim_timeout entry
 3034                          * point so we can attempt recovery?  If
 3035                          * this is only used for dumps, I don't think
 3036                          * it is.
 3037                          */
 3038                         start_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
 3039                 }
 3040         } else {
 3041                 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
 3042         }
 3043 
 3044         /* We will use CAM ISR for this SIM again. */
 3045         sim->flags &= ~CAM_SIM_POLLED;
 3046 }
 3047 
 3048 /*
 3049  * Schedule a peripheral driver to receive a ccb when it's
 3050  * target device has space for more transactions.
 3051  */
 3052 void
 3053 xpt_schedule(struct cam_periph *perph, u_int32_t new_priority)
 3054 {
 3055         struct cam_ed *device;
 3056         int runq = 0;
 3057 
 3058         mtx_assert(perph->sim->mtx, MA_OWNED);
 3059 
 3060         CAM_DEBUG(perph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n"));
 3061         device = perph->path->device;
 3062         if (periph_is_queued(perph)) {
 3063                 /* Simply reorder based on new priority */
 3064                 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
 3065                           ("   change priority to %d\n", new_priority));
 3066                 if (new_priority < perph->pinfo.priority) {
 3067                         camq_change_priority(&device->drvq,
 3068                                              perph->pinfo.index,
 3069                                              new_priority);
 3070                         runq = xpt_schedule_dev_allocq(perph->path->bus, device);
 3071                 }
 3072         } else {
 3073                 /* New entry on the queue */
 3074                 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
 3075                           ("   added periph to queue\n"));
 3076                 perph->pinfo.priority = new_priority;
 3077                 perph->pinfo.generation = ++device->drvq.generation;
 3078                 camq_insert(&device->drvq, &perph->pinfo);
 3079                 runq = xpt_schedule_dev_allocq(perph->path->bus, device);
 3080         }
 3081         if (runq != 0) {
 3082                 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
 3083                           ("   calling xpt_run_devq\n"));
 3084                 xpt_run_dev_allocq(perph->path->bus);
 3085         }
 3086 }
 3087 
 3088 
 3089 /*
 3090  * Schedule a device to run on a given queue.
 3091  * If the device was inserted as a new entry on the queue,
 3092  * return 1 meaning the device queue should be run. If we
 3093  * were already queued, implying someone else has already
 3094  * started the queue, return 0 so the caller doesn't attempt
 3095  * to run the queue.
 3096  */
 3097 int
 3098 xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo,
 3099                  u_int32_t new_priority)
 3100 {
 3101         int retval;
 3102         u_int32_t old_priority;
 3103 
 3104         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_schedule_dev\n"));
 3105 
 3106         old_priority = pinfo->priority;
 3107 
 3108         /*
 3109          * Are we already queued?
 3110          */
 3111         if (pinfo->index != CAM_UNQUEUED_INDEX) {
 3112                 /* Simply reorder based on new priority */
 3113                 if (new_priority < old_priority) {
 3114                         camq_change_priority(queue, pinfo->index,
 3115                                              new_priority);
 3116                         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3117                                         ("changed priority to %d\n",
 3118                                          new_priority));
 3119                         retval = 1;
 3120                 } else
 3121                         retval = 0;
 3122         } else {
 3123                 /* New entry on the queue */
 3124                 if (new_priority < old_priority)
 3125                         pinfo->priority = new_priority;
 3126 
 3127                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3128                                 ("Inserting onto queue\n"));
 3129                 pinfo->generation = ++queue->generation;
 3130                 camq_insert(queue, pinfo);
 3131                 retval = 1;
 3132         }
 3133         return (retval);
 3134 }
 3135 
 3136 static void
 3137 xpt_run_dev_allocq(struct cam_eb *bus)
 3138 {
 3139         struct  cam_devq *devq;
 3140 
 3141         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_allocq\n"));
 3142         devq = bus->sim->devq;
 3143 
 3144         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3145                         ("   qfrozen_cnt == 0x%x, entries == %d, "
 3146                          "openings == %d, active == %d\n",
 3147                          devq->alloc_queue.qfrozen_cnt[0],
 3148                          devq->alloc_queue.entries,
 3149                          devq->alloc_openings,
 3150                          devq->alloc_active));
 3151 
 3152         devq->alloc_queue.qfrozen_cnt[0]++;
 3153         while ((devq->alloc_queue.entries > 0)
 3154             && (devq->alloc_openings > 0)
 3155             && (devq->alloc_queue.qfrozen_cnt[0] <= 1)) {
 3156                 struct  cam_ed_qinfo *qinfo;
 3157                 struct  cam_ed *device;
 3158                 union   ccb *work_ccb;
 3159                 struct  cam_periph *drv;
 3160                 struct  camq *drvq;
 3161 
 3162                 qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->alloc_queue,
 3163                                                            CAMQ_HEAD);
 3164                 device = qinfo->device;
 3165                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3166                                 ("running device %p\n", device));
 3167 
 3168                 drvq = &device->drvq;
 3169                 KASSERT(drvq->entries > 0, ("xpt_run_dev_allocq: "
 3170                     "Device on queue without any work to do"));
 3171                 if ((work_ccb = xpt_get_ccb(device)) != NULL) {
 3172                         devq->alloc_openings--;
 3173                         devq->alloc_active++;
 3174                         drv = (struct cam_periph*)camq_remove(drvq, CAMQ_HEAD);
 3175                         xpt_setup_ccb(&work_ccb->ccb_h, drv->path,
 3176                                       drv->pinfo.priority);
 3177                         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3178                                         ("calling periph start\n"));
 3179                         drv->periph_start(drv, work_ccb);
 3180                 } else {
 3181                         /*
 3182                          * Malloc failure in alloc_ccb
 3183                          */
 3184                         /*
 3185                          * XXX add us to a list to be run from free_ccb
 3186                          * if we don't have any ccbs active on this
 3187                          * device queue otherwise we may never get run
 3188                          * again.
 3189                          */
 3190                         break;
 3191                 }
 3192 
 3193                 /* We may have more work. Attempt to reschedule. */
 3194                 xpt_schedule_dev_allocq(bus, device);
 3195         }
 3196         devq->alloc_queue.qfrozen_cnt[0]--;
 3197 }
 3198 
 3199 static void
 3200 xpt_run_dev_sendq(struct cam_eb *bus)
 3201 {
 3202         struct  cam_devq *devq;
 3203 
 3204         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_sendq\n"));
 3205 
 3206         devq = bus->sim->devq;
 3207 
 3208         devq->send_queue.qfrozen_cnt[0]++;
 3209         while ((devq->send_queue.entries > 0)
 3210             && (devq->send_openings > 0)
 3211             && (devq->send_queue.qfrozen_cnt[0] <= 1)) {
 3212                 struct  cam_ed_qinfo *qinfo;
 3213                 struct  cam_ed *device;
 3214                 union ccb *work_ccb;
 3215                 struct  cam_sim *sim;
 3216 
 3217                 qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->send_queue,
 3218                                                            CAMQ_HEAD);
 3219                 device = qinfo->device;
 3220                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3221                                 ("running device %p\n", device));
 3222 
 3223                 work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD);
 3224                 if (work_ccb == NULL) {
 3225                         printf("device on run queue with no ccbs???\n");
 3226                         continue;
 3227                 }
 3228 
 3229                 if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) {
 3230 
 3231                         mtx_lock(&xsoftc.xpt_lock);
 3232                         if (xsoftc.num_highpower <= 0) {
 3233                                 /*
 3234                                  * We got a high power command, but we
 3235                                  * don't have any available slots.  Freeze
 3236                                  * the device queue until we have a slot
 3237                                  * available.
 3238                                  */
 3239                                 xpt_freeze_devq(work_ccb->ccb_h.path, 1);
 3240                                 STAILQ_INSERT_TAIL(&xsoftc.highpowerq,
 3241                                                    &work_ccb->ccb_h,
 3242                                                    xpt_links.stqe);
 3243 
 3244                                 mtx_unlock(&xsoftc.xpt_lock);
 3245                                 continue;
 3246                         } else {
 3247                                 /*
 3248                                  * Consume a high power slot while
 3249                                  * this ccb runs.
 3250                                  */
 3251                                 xsoftc.num_highpower--;
 3252                         }
 3253                         mtx_unlock(&xsoftc.xpt_lock);
 3254                 }
 3255                 cam_ccbq_remove_ccb(&device->ccbq, work_ccb);
 3256                 cam_ccbq_send_ccb(&device->ccbq, work_ccb);
 3257 
 3258                 devq->send_openings--;
 3259                 devq->send_active++;
 3260 
 3261                 xpt_schedule_dev_sendq(bus, device);
 3262 
 3263                 if (work_ccb && (work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0){
 3264                         /*
 3265                          * The client wants to freeze the queue
 3266                          * after this CCB is sent.
 3267                          */
 3268                         xpt_freeze_devq(work_ccb->ccb_h.path, 1);
 3269                 }
 3270 
 3271                 /* In Target mode, the peripheral driver knows best... */
 3272                 if (work_ccb->ccb_h.func_code == XPT_SCSI_IO) {
 3273                         if ((device->inq_flags & SID_CmdQue) != 0
 3274                          && work_ccb->csio.tag_action != CAM_TAG_ACTION_NONE)
 3275                                 work_ccb->ccb_h.flags |= CAM_TAG_ACTION_VALID;
 3276                         else
 3277                                 /*
 3278                                  * Clear this in case of a retried CCB that
 3279                                  * failed due to a rejected tag.
 3280                                  */
 3281                                 work_ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
 3282                 }
 3283 
 3284                 /*
 3285                  * Device queues can be shared among multiple sim instances
 3286                  * that reside on different busses.  Use the SIM in the queue
 3287                  * CCB's path, rather than the one in the bus that was passed
 3288                  * into this function.
 3289                  */
 3290                 sim = work_ccb->ccb_h.path->bus->sim;
 3291                 (*(sim->sim_action))(sim, work_ccb);
 3292         }
 3293         devq->send_queue.qfrozen_cnt[0]--;
 3294 }
 3295 
 3296 /*
 3297  * This function merges stuff from the slave ccb into the master ccb, while
 3298  * keeping important fields in the master ccb constant.
 3299  */
 3300 void
 3301 xpt_merge_ccb(union ccb *master_ccb, union ccb *slave_ccb)
 3302 {
 3303 
 3304         /*
 3305          * Pull fields that are valid for peripheral drivers to set
 3306          * into the master CCB along with the CCB "payload".
 3307          */
 3308         master_ccb->ccb_h.retry_count = slave_ccb->ccb_h.retry_count;
 3309         master_ccb->ccb_h.func_code = slave_ccb->ccb_h.func_code;
 3310         master_ccb->ccb_h.timeout = slave_ccb->ccb_h.timeout;
 3311         master_ccb->ccb_h.flags = slave_ccb->ccb_h.flags;
 3312         bcopy(&(&slave_ccb->ccb_h)[1], &(&master_ccb->ccb_h)[1],
 3313               sizeof(union ccb) - sizeof(struct ccb_hdr));
 3314 }
 3315 
 3316 void
 3317 xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority)
 3318 {
 3319 
 3320         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_setup_ccb\n"));
 3321         ccb_h->pinfo.priority = priority;
 3322         ccb_h->path = path;
 3323         ccb_h->path_id = path->bus->path_id;
 3324         if (path->target)
 3325                 ccb_h->target_id = path->target->target_id;
 3326         else
 3327                 ccb_h->target_id = CAM_TARGET_WILDCARD;
 3328         if (path->device) {
 3329                 ccb_h->target_lun = path->device->lun_id;
 3330                 ccb_h->pinfo.generation = ++path->device->ccbq.queue.generation;
 3331         } else {
 3332                 ccb_h->target_lun = CAM_TARGET_WILDCARD;
 3333         }
 3334         ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
 3335         ccb_h->flags = 0;
 3336 }
 3337 
 3338 /* Path manipulation functions */
 3339 cam_status
 3340 xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph,
 3341                 path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
 3342 {
 3343         struct     cam_path *path;
 3344         cam_status status;
 3345 
 3346         path = (struct cam_path *)malloc(sizeof(*path), M_CAMXPT, M_NOWAIT);
 3347 
 3348         if (path == NULL) {
 3349                 status = CAM_RESRC_UNAVAIL;
 3350                 return(status);
 3351         }
 3352         status = xpt_compile_path(path, perph, path_id, target_id, lun_id);
 3353         if (status != CAM_REQ_CMP) {
 3354                 free(path, M_CAMXPT);
 3355                 path = NULL;
 3356         }
 3357         *new_path_ptr = path;
 3358         return (status);
 3359 }
 3360 
 3361 cam_status
 3362 xpt_create_path_unlocked(struct cam_path **new_path_ptr,
 3363                          struct cam_periph *periph, path_id_t path_id,
 3364                          target_id_t target_id, lun_id_t lun_id)
 3365 {
 3366         struct     cam_path *path;
 3367         struct     cam_eb *bus = NULL;
 3368         cam_status status;
 3369         int        need_unlock = 0;
 3370 
 3371         path = (struct cam_path *)malloc(sizeof(*path), M_CAMXPT, M_WAITOK);
 3372 
 3373         if (path_id != CAM_BUS_WILDCARD) {
 3374                 bus = xpt_find_bus(path_id);
 3375                 if (bus != NULL) {
 3376                         need_unlock = 1;
 3377                         CAM_SIM_LOCK(bus->sim);
 3378                 }
 3379         }
 3380         status = xpt_compile_path(path, periph, path_id, target_id, lun_id);
 3381         if (need_unlock) {
 3382                 CAM_SIM_UNLOCK(bus->sim);
 3383                 xpt_release_bus(bus);
 3384         }
 3385         if (status != CAM_REQ_CMP) {
 3386                 free(path, M_CAMXPT);
 3387                 path = NULL;
 3388         }
 3389         *new_path_ptr = path;
 3390         return (status);
 3391 }
 3392 
 3393 cam_status
 3394 xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph,
 3395                  path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
 3396 {
 3397         struct       cam_eb *bus;
 3398         struct       cam_et *target;
 3399         struct       cam_ed *device;
 3400         cam_status   status;
 3401 
 3402         status = CAM_REQ_CMP;   /* Completed without error */
 3403         target = NULL;          /* Wildcarded */
 3404         device = NULL;          /* Wildcarded */
 3405 
 3406         /*
 3407          * We will potentially modify the EDT, so block interrupts
 3408          * that may attempt to create cam paths.
 3409          */
 3410         bus = xpt_find_bus(path_id);
 3411         if (bus == NULL) {
 3412                 status = CAM_PATH_INVALID;
 3413         } else {
 3414                 target = xpt_find_target(bus, target_id);
 3415                 if (target == NULL) {
 3416                         /* Create one */
 3417                         struct cam_et *new_target;
 3418 
 3419                         new_target = xpt_alloc_target(bus, target_id);
 3420                         if (new_target == NULL) {
 3421                                 status = CAM_RESRC_UNAVAIL;
 3422                         } else {
 3423                                 target = new_target;
 3424                         }
 3425                 }
 3426                 if (target != NULL) {
 3427                         device = xpt_find_device(target, lun_id);
 3428                         if (device == NULL) {
 3429                                 /* Create one */
 3430                                 struct cam_ed *new_device;
 3431 
 3432                                 new_device =
 3433                                     (*(bus->xport->alloc_device))(bus,
 3434                                                                       target,
 3435                                                                       lun_id);
 3436                                 if (new_device == NULL) {
 3437                                         status = CAM_RESRC_UNAVAIL;
 3438                                 } else {
 3439                                         device = new_device;
 3440                                 }
 3441                         }
 3442                 }
 3443         }
 3444 
 3445         /*
 3446          * Only touch the user's data if we are successful.
 3447          */
 3448         if (status == CAM_REQ_CMP) {
 3449                 new_path->periph = perph;
 3450                 new_path->bus = bus;
 3451                 new_path->target = target;
 3452                 new_path->device = device;
 3453                 CAM_DEBUG(new_path, CAM_DEBUG_TRACE, ("xpt_compile_path\n"));
 3454         } else {
 3455                 if (device != NULL)
 3456                         xpt_release_device(device);
 3457                 if (target != NULL)
 3458                         xpt_release_target(target);
 3459                 if (bus != NULL)
 3460                         xpt_release_bus(bus);
 3461         }
 3462         return (status);
 3463 }
 3464 
 3465 void
 3466 xpt_release_path(struct cam_path *path)
 3467 {
 3468         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_path\n"));
 3469         if (path->device != NULL) {
 3470                 xpt_release_device(path->device);
 3471                 path->device = NULL;
 3472         }
 3473         if (path->target != NULL) {
 3474                 xpt_release_target(path->target);
 3475                 path->target = NULL;
 3476         }
 3477         if (path->bus != NULL) {
 3478                 xpt_release_bus(path->bus);
 3479                 path->bus = NULL;
 3480         }
 3481 }
 3482 
 3483 void
 3484 xpt_free_path(struct cam_path *path)
 3485 {
 3486 
 3487         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_free_path\n"));
 3488         xpt_release_path(path);
 3489         free(path, M_CAMXPT);
 3490 }
 3491 
 3492 void
 3493 xpt_path_counts(struct cam_path *path, uint32_t *bus_ref,
 3494     uint32_t *periph_ref, uint32_t *target_ref, uint32_t *device_ref)
 3495 {
 3496 
 3497         mtx_lock(&xsoftc.xpt_topo_lock);
 3498         if (bus_ref) {
 3499                 if (path->bus)
 3500                         *bus_ref = path->bus->refcount;
 3501                 else
 3502                         *bus_ref = 0;
 3503         }
 3504         mtx_unlock(&xsoftc.xpt_topo_lock);
 3505         if (periph_ref) {
 3506                 if (path->periph)
 3507                         *periph_ref = path->periph->refcount;
 3508                 else
 3509                         *periph_ref = 0;
 3510         }
 3511         if (target_ref) {
 3512                 if (path->target)
 3513                         *target_ref = path->target->refcount;
 3514                 else
 3515                         *target_ref = 0;
 3516         }
 3517         if (device_ref) {
 3518                 if (path->device)
 3519                         *device_ref = path->device->refcount;
 3520                 else
 3521                         *device_ref = 0;
 3522         }
 3523 }
 3524 
 3525 /*
 3526  * Return -1 for failure, 0 for exact match, 1 for match with wildcards
 3527  * in path1, 2 for match with wildcards in path2.
 3528  */
 3529 int
 3530 xpt_path_comp(struct cam_path *path1, struct cam_path *path2)
 3531 {
 3532         int retval = 0;
 3533 
 3534         if (path1->bus != path2->bus) {
 3535                 if (path1->bus->path_id == CAM_BUS_WILDCARD)
 3536                         retval = 1;
 3537                 else if (path2->bus->path_id == CAM_BUS_WILDCARD)
 3538                         retval = 2;
 3539                 else
 3540                         return (-1);
 3541         }
 3542         if (path1->target != path2->target) {
 3543                 if (path1->target->target_id == CAM_TARGET_WILDCARD) {
 3544                         if (retval == 0)
 3545                                 retval = 1;
 3546                 } else if (path2->target->target_id == CAM_TARGET_WILDCARD)
 3547                         retval = 2;
 3548                 else
 3549                         return (-1);
 3550         }
 3551         if (path1->device != path2->device) {
 3552                 if (path1->device->lun_id == CAM_LUN_WILDCARD) {
 3553                         if (retval == 0)
 3554                                 retval = 1;
 3555                 } else if (path2->device->lun_id == CAM_LUN_WILDCARD)
 3556                         retval = 2;
 3557                 else
 3558                         return (-1);
 3559         }
 3560         return (retval);
 3561 }
 3562 
 3563 void
 3564 xpt_print_path(struct cam_path *path)
 3565 {
 3566 
 3567         if (path == NULL)
 3568                 printf("(nopath): ");
 3569         else {
 3570                 if (path->periph != NULL)
 3571                         printf("(%s%d:", path->periph->periph_name,
 3572                                path->periph->unit_number);
 3573                 else
 3574                         printf("(noperiph:");
 3575 
 3576                 if (path->bus != NULL)
 3577                         printf("%s%d:%d:", path->bus->sim->sim_name,
 3578                                path->bus->sim->unit_number,
 3579                                path->bus->sim->bus_id);
 3580                 else
 3581                         printf("nobus:");
 3582 
 3583                 if (path->target != NULL)
 3584                         printf("%d:", path->target->target_id);
 3585                 else
 3586                         printf("X:");
 3587 
 3588                 if (path->device != NULL)
 3589                         printf("%d): ", path->device->lun_id);
 3590                 else
 3591                         printf("X): ");
 3592         }
 3593 }
 3594 
 3595 void
 3596 xpt_print(struct cam_path *path, const char *fmt, ...)
 3597 {
 3598         va_list ap;
 3599         xpt_print_path(path);
 3600         va_start(ap, fmt);
 3601         vprintf(fmt, ap);
 3602         va_end(ap);
 3603 }
 3604 
 3605 int
 3606 xpt_path_string(struct cam_path *path, char *str, size_t str_len)
 3607 {
 3608         struct sbuf sb;
 3609 
 3610 #ifdef INVARIANTS
 3611         if (path != NULL && path->bus != NULL)
 3612                 mtx_assert(path->bus->sim->mtx, MA_OWNED);
 3613 #endif
 3614 
 3615         sbuf_new(&sb, str, str_len, 0);
 3616 
 3617         if (path == NULL)
 3618                 sbuf_printf(&sb, "(nopath): ");
 3619         else {
 3620                 if (path->periph != NULL)
 3621                         sbuf_printf(&sb, "(%s%d:", path->periph->periph_name,
 3622                                     path->periph->unit_number);
 3623                 else
 3624                         sbuf_printf(&sb, "(noperiph:");
 3625 
 3626                 if (path->bus != NULL)
 3627                         sbuf_printf(&sb, "%s%d:%d:", path->bus->sim->sim_name,
 3628                                     path->bus->sim->unit_number,
 3629                                     path->bus->sim->bus_id);
 3630                 else
 3631                         sbuf_printf(&sb, "nobus:");
 3632 
 3633                 if (path->target != NULL)
 3634                         sbuf_printf(&sb, "%d:", path->target->target_id);
 3635                 else
 3636                         sbuf_printf(&sb, "X:");
 3637 
 3638                 if (path->device != NULL)
 3639                         sbuf_printf(&sb, "%d): ", path->device->lun_id);
 3640                 else
 3641                         sbuf_printf(&sb, "X): ");
 3642         }
 3643         sbuf_finish(&sb);
 3644 
 3645         return(sbuf_len(&sb));
 3646 }
 3647 
 3648 path_id_t
 3649 xpt_path_path_id(struct cam_path *path)
 3650 {
 3651         mtx_assert(path->bus->sim->mtx, MA_OWNED);
 3652 
 3653         return(path->bus->path_id);
 3654 }
 3655 
 3656 target_id_t
 3657 xpt_path_target_id(struct cam_path *path)
 3658 {
 3659         mtx_assert(path->bus->sim->mtx, MA_OWNED);
 3660 
 3661         if (path->target != NULL)
 3662                 return (path->target->target_id);
 3663         else
 3664                 return (CAM_TARGET_WILDCARD);
 3665 }
 3666 
 3667 lun_id_t
 3668 xpt_path_lun_id(struct cam_path *path)
 3669 {
 3670         mtx_assert(path->bus->sim->mtx, MA_OWNED);
 3671 
 3672         if (path->device != NULL)
 3673                 return (path->device->lun_id);
 3674         else
 3675                 return (CAM_LUN_WILDCARD);
 3676 }
 3677 
 3678 struct cam_sim *
 3679 xpt_path_sim(struct cam_path *path)
 3680 {
 3681 
 3682         return (path->bus->sim);
 3683 }
 3684 
 3685 struct cam_periph*
 3686 xpt_path_periph(struct cam_path *path)
 3687 {
 3688         mtx_assert(path->bus->sim->mtx, MA_OWNED);
 3689 
 3690         return (path->periph);
 3691 }
 3692 
 3693 int
 3694 xpt_path_legacy_ata_id(struct cam_path *path)
 3695 {
 3696         struct cam_eb *bus;
 3697         int bus_id;
 3698 
 3699         if ((strcmp(path->bus->sim->sim_name, "ata") != 0) &&
 3700             strcmp(path->bus->sim->sim_name, "ahcich") != 0 &&
 3701             strcmp(path->bus->sim->sim_name, "mvsch") != 0 &&
 3702             strcmp(path->bus->sim->sim_name, "siisch") != 0)
 3703                 return (-1);
 3704 
 3705         if (strcmp(path->bus->sim->sim_name, "ata") == 0 &&
 3706             path->bus->sim->unit_number < 2) {
 3707                 bus_id = path->bus->sim->unit_number;
 3708         } else {
 3709                 bus_id = 2;
 3710                 xpt_lock_buses();
 3711                 TAILQ_FOREACH(bus, &xsoftc.xpt_busses, links) {
 3712                         if (bus == path->bus)
 3713                                 break;
 3714                         if ((strcmp(bus->sim->sim_name, "ata") == 0 &&
 3715                              bus->sim->unit_number >= 2) ||
 3716                             strcmp(bus->sim->sim_name, "ahcich") == 0 ||
 3717                             strcmp(bus->sim->sim_name, "mvsch") == 0 ||
 3718                             strcmp(bus->sim->sim_name, "siisch") == 0)
 3719                                 bus_id++;
 3720                 }
 3721                 xpt_unlock_buses();
 3722         }
 3723         if (path->target != NULL) {
 3724                 if (path->target->target_id < 2)
 3725                         return (bus_id * 2 + path->target->target_id);
 3726                 else
 3727                         return (-1);
 3728         } else
 3729                 return (bus_id * 2);
 3730 }
 3731 
 3732 /*
 3733  * Release a CAM control block for the caller.  Remit the cost of the structure
 3734  * to the device referenced by the path.  If the this device had no 'credits'
 3735  * and peripheral drivers have registered async callbacks for this notification
 3736  * call them now.
 3737  */
 3738 void
 3739 xpt_release_ccb(union ccb *free_ccb)
 3740 {
 3741         struct   cam_path *path;
 3742         struct   cam_ed *device;
 3743         struct   cam_eb *bus;
 3744         struct   cam_sim *sim;
 3745 
 3746         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_release_ccb\n"));
 3747         path = free_ccb->ccb_h.path;
 3748         device = path->device;
 3749         bus = path->bus;
 3750         sim = bus->sim;
 3751 
 3752         mtx_assert(sim->mtx, MA_OWNED);
 3753 
 3754         cam_ccbq_release_opening(&device->ccbq);
 3755         if (device->flags & CAM_DEV_RESIZE_QUEUE_NEEDED) {
 3756                 device->flags &= ~CAM_DEV_RESIZE_QUEUE_NEEDED;
 3757                 cam_ccbq_resize(&device->ccbq,
 3758                     device->ccbq.dev_openings + device->ccbq.dev_active);
 3759         }
 3760         if (sim->ccb_count > sim->max_ccbs) {
 3761                 xpt_free_ccb(free_ccb);
 3762                 sim->ccb_count--;
 3763         } else {
 3764                 SLIST_INSERT_HEAD(&sim->ccb_freeq, &free_ccb->ccb_h,
 3765                     xpt_links.sle);
 3766         }
 3767         if (sim->devq == NULL) {
 3768                 return;
 3769         }
 3770         sim->devq->alloc_openings++;
 3771         sim->devq->alloc_active--;
 3772         if (device_is_alloc_queued(device) == 0)
 3773                 xpt_schedule_dev_allocq(bus, device);
 3774         xpt_run_dev_allocq(bus);
 3775 }
 3776 
 3777 /* Functions accessed by SIM drivers */
 3778 
 3779 static struct xpt_xport xport_default = {
 3780         .alloc_device = xpt_alloc_device_default,
 3781         .action = xpt_action_default,
 3782         .async = xpt_dev_async_default,
 3783 };
 3784 
 3785 /*
 3786  * A sim structure, listing the SIM entry points and instance
 3787  * identification info is passed to xpt_bus_register to hook the SIM
 3788  * into the CAM framework.  xpt_bus_register creates a cam_eb entry
 3789  * for this new bus and places it in the array of busses and assigns
 3790  * it a path_id.  The path_id may be influenced by "hard wiring"
 3791  * information specified by the user.  Once interrupt services are
 3792  * available, the bus will be probed.
 3793  */
 3794 int32_t
 3795 xpt_bus_register(struct cam_sim *sim, device_t parent, u_int32_t bus)
 3796 {
 3797         struct cam_eb *new_bus;
 3798         struct cam_eb *old_bus;
 3799         struct ccb_pathinq cpi;
 3800         struct cam_path *path;
 3801         cam_status status;
 3802 
 3803         mtx_assert(sim->mtx, MA_OWNED);
 3804 
 3805         sim->bus_id = bus;
 3806         new_bus = (struct cam_eb *)malloc(sizeof(*new_bus),
 3807                                           M_CAMXPT, M_NOWAIT);
 3808         if (new_bus == NULL) {
 3809                 /* Couldn't satisfy request */
 3810                 return (CAM_RESRC_UNAVAIL);
 3811         }
 3812         path = (struct cam_path *)malloc(sizeof(*path), M_CAMXPT, M_NOWAIT);
 3813         if (path == NULL) {
 3814                 free(new_bus, M_CAMXPT);
 3815                 return (CAM_RESRC_UNAVAIL);
 3816         }
 3817 
 3818         if (strcmp(sim->sim_name, "xpt") != 0) {
 3819                 sim->path_id =
 3820                     xptpathid(sim->sim_name, sim->unit_number, sim->bus_id);
 3821         }
 3822 
 3823         TAILQ_INIT(&new_bus->et_entries);
 3824         new_bus->path_id = sim->path_id;
 3825         cam_sim_hold(sim);
 3826         new_bus->sim = sim;
 3827         timevalclear(&new_bus->last_reset);
 3828         new_bus->flags = 0;
 3829         new_bus->refcount = 1;  /* Held until a bus_deregister event */
 3830         new_bus->generation = 0;
 3831 
 3832         mtx_lock(&xsoftc.xpt_topo_lock);
 3833         old_bus = TAILQ_FIRST(&xsoftc.xpt_busses);
 3834         while (old_bus != NULL
 3835             && old_bus->path_id < new_bus->path_id)
 3836                 old_bus = TAILQ_NEXT(old_bus, links);
 3837         if (old_bus != NULL)
 3838                 TAILQ_INSERT_BEFORE(old_bus, new_bus, links);
 3839         else
 3840                 TAILQ_INSERT_TAIL(&xsoftc.xpt_busses, new_bus, links);
 3841         xsoftc.bus_generation++;
 3842         mtx_unlock(&xsoftc.xpt_topo_lock);
 3843 
 3844         /*
 3845          * Set a default transport so that a PATH_INQ can be issued to
 3846          * the SIM.  This will then allow for probing and attaching of
 3847          * a more appropriate transport.
 3848          */
 3849         new_bus->xport = &xport_default;
 3850 
 3851         status = xpt_compile_path(path, /*periph*/NULL, sim->path_id,
 3852                                   CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
 3853         if (status != CAM_REQ_CMP)
 3854                 printf("xpt_compile_path returned %d\n", status);
 3855 
 3856         xpt_setup_ccb(&cpi.ccb_h, path, CAM_PRIORITY_NORMAL);
 3857         cpi.ccb_h.func_code = XPT_PATH_INQ;
 3858         xpt_action((union ccb *)&cpi);
 3859 
 3860         if (cpi.ccb_h.status == CAM_REQ_CMP) {
 3861                 switch (cpi.transport) {
 3862                 case XPORT_SPI:
 3863                 case XPORT_SAS:
 3864                 case XPORT_FC:
 3865                 case XPORT_USB:
 3866                 case XPORT_ISCSI:
 3867                 case XPORT_PPB:
 3868                         new_bus->xport = scsi_get_xport();
 3869                         break;
 3870                 case XPORT_ATA:
 3871                 case XPORT_SATA:
 3872                         new_bus->xport = ata_get_xport();
 3873                         break;
 3874                 default:
 3875                         new_bus->xport = &xport_default;
 3876                         break;
 3877                 }
 3878         }
 3879 
 3880         /* Notify interested parties */
 3881         if (sim->path_id != CAM_XPT_PATH_ID) {
 3882                 union   ccb *scan_ccb;
 3883 
 3884                 xpt_async(AC_PATH_REGISTERED, path, &cpi);
 3885                 /* Initiate bus rescan. */
 3886                 scan_ccb = xpt_alloc_ccb_nowait();
 3887                 scan_ccb->ccb_h.path = path;
 3888                 scan_ccb->ccb_h.func_code = XPT_SCAN_BUS;
 3889                 scan_ccb->crcn.flags = 0;
 3890                 xpt_rescan(scan_ccb);
 3891         } else
 3892                 xpt_free_path(path);
 3893         return (CAM_SUCCESS);
 3894 }
 3895 
 3896 int32_t
 3897 xpt_bus_deregister(path_id_t pathid)
 3898 {
 3899         struct cam_path bus_path;
 3900         cam_status status;
 3901 
 3902         status = xpt_compile_path(&bus_path, NULL, pathid,
 3903                                   CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
 3904         if (status != CAM_REQ_CMP)
 3905                 return (status);
 3906 
 3907         xpt_async(AC_LOST_DEVICE, &bus_path, NULL);
 3908         xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL);
 3909 
 3910         /* Release the reference count held while registered. */
 3911         xpt_release_bus(bus_path.bus);
 3912         xpt_release_path(&bus_path);
 3913 
 3914         return (CAM_REQ_CMP);
 3915 }
 3916 
 3917 static path_id_t
 3918 xptnextfreepathid(void)
 3919 {
 3920         struct cam_eb *bus;
 3921         path_id_t pathid;
 3922         const char *strval;
 3923 
 3924         pathid = 0;
 3925         mtx_lock(&xsoftc.xpt_topo_lock);
 3926         bus = TAILQ_FIRST(&xsoftc.xpt_busses);
 3927 retry:
 3928         /* Find an unoccupied pathid */
 3929         while (bus != NULL && bus->path_id <= pathid) {
 3930                 if (bus->path_id == pathid)
 3931                         pathid++;
 3932                 bus = TAILQ_NEXT(bus, links);
 3933         }
 3934         mtx_unlock(&xsoftc.xpt_topo_lock);
 3935 
 3936         /*
 3937          * Ensure that this pathid is not reserved for
 3938          * a bus that may be registered in the future.
 3939          */
 3940         if (resource_string_value("scbus", pathid, "at", &strval) == 0) {
 3941                 ++pathid;
 3942                 /* Start the search over */
 3943                 mtx_lock(&xsoftc.xpt_topo_lock);
 3944                 goto retry;
 3945         }
 3946         return (pathid);
 3947 }
 3948 
 3949 static path_id_t
 3950 xptpathid(const char *sim_name, int sim_unit, int sim_bus)
 3951 {
 3952         path_id_t pathid;
 3953         int i, dunit, val;
 3954         char buf[32];
 3955         const char *dname;
 3956 
 3957         pathid = CAM_XPT_PATH_ID;
 3958         snprintf(buf, sizeof(buf), "%s%d", sim_name, sim_unit);
 3959         i = 0;
 3960         while ((resource_find_match(&i, &dname, &dunit, "at", buf)) == 0) {
 3961                 if (strcmp(dname, "scbus")) {
 3962                         /* Avoid a bit of foot shooting. */
 3963                         continue;
 3964                 }
 3965                 if (dunit < 0)          /* unwired?! */
 3966                         continue;
 3967                 if (resource_int_value("scbus", dunit, "bus", &val) == 0) {
 3968                         if (sim_bus == val) {
 3969                                 pathid = dunit;
 3970                                 break;
 3971                         }
 3972                 } else if (sim_bus == 0) {
 3973                         /* Unspecified matches bus 0 */
 3974                         pathid = dunit;
 3975                         break;
 3976                 } else {
 3977                         printf("Ambiguous scbus configuration for %s%d "
 3978                                "bus %d, cannot wire down.  The kernel "
 3979                                "config entry for scbus%d should "
 3980                                "specify a controller bus.\n"
 3981                                "Scbus will be assigned dynamically.\n",
 3982                                sim_name, sim_unit, sim_bus, dunit);
 3983                         break;
 3984                 }
 3985         }
 3986 
 3987         if (pathid == CAM_XPT_PATH_ID)
 3988                 pathid = xptnextfreepathid();
 3989         return (pathid);
 3990 }
 3991 
 3992 static const char *
 3993 xpt_async_string(u_int32_t async_code)
 3994 {
 3995 
 3996         switch (async_code) {
 3997         case AC_BUS_RESET: return ("AC_BUS_RESET");
 3998         case AC_UNSOL_RESEL: return ("AC_UNSOL_RESEL");
 3999         case AC_SCSI_AEN: return ("AC_SCSI_AEN");
 4000         case AC_SENT_BDR: return ("AC_SENT_BDR");
 4001         case AC_PATH_REGISTERED: return ("AC_PATH_REGISTERED");
 4002         case AC_PATH_DEREGISTERED: return ("AC_PATH_DEREGISTERED");
 4003         case AC_FOUND_DEVICE: return ("AC_FOUND_DEVICE");
 4004         case AC_LOST_DEVICE: return ("AC_LOST_DEVICE");
 4005         case AC_TRANSFER_NEG: return ("AC_TRANSFER_NEG");
 4006         case AC_INQ_CHANGED: return ("AC_INQ_CHANGED");
 4007         case AC_GETDEV_CHANGED: return ("AC_GETDEV_CHANGED");
 4008         case AC_CONTRACT: return ("AC_CONTRACT");
 4009         }
 4010         return ("AC_UNKNOWN");
 4011 }
 4012 
 4013 void
 4014 xpt_async(u_int32_t async_code, struct cam_path *path, void *async_arg)
 4015 {
 4016         struct cam_eb *bus;
 4017         struct cam_et *target, *next_target;
 4018         struct cam_ed *device, *next_device;
 4019 
 4020         mtx_assert(path->bus->sim->mtx, MA_OWNED);
 4021         CAM_DEBUG(path, CAM_DEBUG_TRACE | CAM_DEBUG_INFO,
 4022             ("xpt_async(%s)\n", xpt_async_string(async_code)));
 4023 
 4024         /*
 4025          * Most async events come from a CAM interrupt context.  In
 4026          * a few cases, the error recovery code at the peripheral layer,
 4027          * which may run from our SWI or a process context, may signal
 4028          * deferred events with a call to xpt_async.
 4029          */
 4030 
 4031         bus = path->bus;
 4032 
 4033         if (async_code == AC_BUS_RESET) {
 4034                 /* Update our notion of when the last reset occurred */
 4035                 microtime(&bus->last_reset);
 4036         }
 4037 
 4038         for (target = TAILQ_FIRST(&bus->et_entries);
 4039              target != NULL;
 4040              target = next_target) {
 4041 
 4042                 next_target = TAILQ_NEXT(target, links);
 4043 
 4044                 if (path->target != target
 4045                  && path->target->target_id != CAM_TARGET_WILDCARD
 4046                  && target->target_id != CAM_TARGET_WILDCARD)
 4047                         continue;
 4048 
 4049                 if (async_code == AC_SENT_BDR) {
 4050                         /* Update our notion of when the last reset occurred */
 4051                         microtime(&path->target->last_reset);
 4052                 }
 4053 
 4054                 for (device = TAILQ_FIRST(&target->ed_entries);
 4055                      device != NULL;
 4056                      device = next_device) {
 4057 
 4058                         next_device = TAILQ_NEXT(device, links);
 4059 
 4060                         if (path->device != device
 4061                          && path->device->lun_id != CAM_LUN_WILDCARD
 4062                          && device->lun_id != CAM_LUN_WILDCARD)
 4063                                 continue;
 4064                         /*
 4065                          * The async callback could free the device.
 4066                          * If it is a broadcast async, it doesn't hold
 4067                          * device reference, so take our own reference.
 4068                          */
 4069                         xpt_acquire_device(device);
 4070                         (*(bus->xport->async))(async_code, bus,
 4071                                                target, device,
 4072                                                async_arg);
 4073 
 4074                         xpt_async_bcast(&device->asyncs, async_code,
 4075                                         path, async_arg);
 4076                         xpt_release_device(device);
 4077                 }
 4078         }
 4079 
 4080         /*
 4081          * If this wasn't a fully wildcarded async, tell all
 4082          * clients that want all async events.
 4083          */
 4084         if (bus != xpt_periph->path->bus)
 4085                 xpt_async_bcast(&xpt_periph->path->device->asyncs, async_code,
 4086                                 path, async_arg);
 4087 }
 4088 
 4089 static void
 4090 xpt_async_bcast(struct async_list *async_head,
 4091                 u_int32_t async_code,
 4092                 struct cam_path *path, void *async_arg)
 4093 {
 4094         struct async_node *cur_entry;
 4095 
 4096         cur_entry = SLIST_FIRST(async_head);
 4097         while (cur_entry != NULL) {
 4098                 struct async_node *next_entry;
 4099                 /*
 4100                  * Grab the next list entry before we call the current
 4101                  * entry's callback.  This is because the callback function
 4102                  * can delete its async callback entry.
 4103                  */
 4104                 next_entry = SLIST_NEXT(cur_entry, links);
 4105                 if ((cur_entry->event_enable & async_code) != 0)
 4106                         cur_entry->callback(cur_entry->callback_arg,
 4107                                             async_code, path,
 4108                                             async_arg);
 4109                 cur_entry = next_entry;
 4110         }
 4111 }
 4112 
 4113 static void
 4114 xpt_dev_async_default(u_int32_t async_code, struct cam_eb *bus,
 4115                       struct cam_et *target, struct cam_ed *device,
 4116                       void *async_arg)
 4117 {
 4118         printf("%s called\n", __func__);
 4119 }
 4120 
 4121 u_int32_t
 4122 xpt_freeze_devq_rl(struct cam_path *path, cam_rl rl, u_int count)
 4123 {
 4124         struct cam_ed *dev = path->device;
 4125 
 4126         mtx_assert(path->bus->sim->mtx, MA_OWNED);
 4127         dev->sim->devq->alloc_openings +=
 4128             cam_ccbq_freeze(&dev->ccbq, rl, count);
 4129         /* Remove frozen device from allocq. */
 4130         if (device_is_alloc_queued(dev) &&
 4131             cam_ccbq_frozen(&dev->ccbq, CAM_PRIORITY_TO_RL(
 4132              CAMQ_GET_PRIO(&dev->drvq)))) {
 4133                 camq_remove(&dev->sim->devq->alloc_queue,
 4134                     dev->alloc_ccb_entry.pinfo.index);
 4135         }
 4136         /* Remove frozen device from sendq. */
 4137         if (device_is_send_queued(dev) &&
 4138             cam_ccbq_frozen_top(&dev->ccbq)) {
 4139                 camq_remove(&dev->sim->devq->send_queue,
 4140                     dev->send_ccb_entry.pinfo.index);
 4141         }
 4142         return (dev->ccbq.queue.qfrozen_cnt[rl]);
 4143 }
 4144 
 4145 u_int32_t
 4146 xpt_freeze_devq(struct cam_path *path, u_int count)
 4147 {
 4148 
 4149         return (xpt_freeze_devq_rl(path, 0, count));
 4150 }
 4151 
 4152 u_int32_t
 4153 xpt_freeze_simq(struct cam_sim *sim, u_int count)
 4154 {
 4155 
 4156         mtx_assert(sim->mtx, MA_OWNED);
 4157         sim->devq->send_queue.qfrozen_cnt[0] += count;
 4158         return (sim->devq->send_queue.qfrozen_cnt[0]);
 4159 }
 4160 
 4161 static void
 4162 xpt_release_devq_timeout(void *arg)
 4163 {
 4164         struct cam_ed *device;
 4165 
 4166         device = (struct cam_ed *)arg;
 4167 
 4168         xpt_release_devq_device(device, /*rl*/0, /*count*/1, /*run_queue*/TRUE);
 4169 }
 4170 
 4171 void
 4172 xpt_release_devq(struct cam_path *path, u_int count, int run_queue)
 4173 {
 4174         mtx_assert(path->bus->sim->mtx, MA_OWNED);
 4175 
 4176         xpt_release_devq_device(path->device, /*rl*/0, count, run_queue);
 4177 }
 4178 
 4179 void
 4180 xpt_release_devq_rl(struct cam_path *path, cam_rl rl, u_int count, int run_queue)
 4181 {
 4182         mtx_assert(path->bus->sim->mtx, MA_OWNED);
 4183 
 4184         xpt_release_devq_device(path->device, rl, count, run_queue);
 4185 }
 4186 
 4187 static void
 4188 xpt_release_devq_device(struct cam_ed *dev, cam_rl rl, u_int count, int run_queue)
 4189 {
 4190 
 4191         if (count > dev->ccbq.queue.qfrozen_cnt[rl]) {
 4192 #ifdef INVARIANTS
 4193                 printf("xpt_release_devq(%d): requested %u > present %u\n",
 4194                     rl, count, dev->ccbq.queue.qfrozen_cnt[rl]);
 4195 #endif
 4196                 count = dev->ccbq.queue.qfrozen_cnt[rl];
 4197         }
 4198         dev->sim->devq->alloc_openings -=
 4199             cam_ccbq_release(&dev->ccbq, rl, count);
 4200         if (cam_ccbq_frozen(&dev->ccbq, CAM_PRIORITY_TO_RL(
 4201             CAMQ_GET_PRIO(&dev->drvq))) == 0) {
 4202                 if (xpt_schedule_dev_allocq(dev->target->bus, dev))
 4203                         xpt_run_dev_allocq(dev->target->bus);
 4204         }
 4205         if (cam_ccbq_frozen_top(&dev->ccbq) == 0) {
 4206                 /*
 4207                  * No longer need to wait for a successful
 4208                  * command completion.
 4209                  */
 4210                 dev->flags &= ~CAM_DEV_REL_ON_COMPLETE;
 4211                 /*
 4212                  * Remove any timeouts that might be scheduled
 4213                  * to release this queue.
 4214                  */
 4215                 if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
 4216                         callout_stop(&dev->callout);
 4217                         dev->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING;
 4218                 }
 4219                 if (run_queue == 0)
 4220                         return;
 4221                 /*
 4222                  * Now that we are unfrozen schedule the
 4223                  * device so any pending transactions are
 4224                  * run.
 4225                  */
 4226                 if (xpt_schedule_dev_sendq(dev->target->bus, dev))
 4227                         xpt_run_dev_sendq(dev->target->bus);
 4228         }
 4229 }
 4230 
 4231 void
 4232 xpt_release_simq(struct cam_sim *sim, int run_queue)
 4233 {
 4234         struct  camq *sendq;
 4235 
 4236         mtx_assert(sim->mtx, MA_OWNED);
 4237         sendq = &(sim->devq->send_queue);
 4238         if (sendq->qfrozen_cnt[0] <= 0) {
 4239 #ifdef INVARIANTS
 4240                 printf("xpt_release_simq: requested 1 > present %u\n",
 4241                     sendq->qfrozen_cnt[0]);
 4242 #endif
 4243         } else
 4244                 sendq->qfrozen_cnt[0]--;
 4245         if (sendq->qfrozen_cnt[0] == 0) {
 4246                 /*
 4247                  * If there is a timeout scheduled to release this
 4248                  * sim queue, remove it.  The queue frozen count is
 4249                  * already at 0.
 4250                  */
 4251                 if ((sim->flags & CAM_SIM_REL_TIMEOUT_PENDING) != 0){
 4252                         callout_stop(&sim->callout);
 4253                         sim->flags &= ~CAM_SIM_REL_TIMEOUT_PENDING;
 4254                 }
 4255                 if (run_queue) {
 4256                         struct cam_eb *bus;
 4257 
 4258                         /*
 4259                          * Now that we are unfrozen run the send queue.
 4260                          */
 4261                         bus = xpt_find_bus(sim->path_id);
 4262                         xpt_run_dev_sendq(bus);
 4263                         xpt_release_bus(bus);
 4264                 }
 4265         }
 4266 }
 4267 
 4268 /*
 4269  * XXX Appears to be unused.
 4270  */
 4271 static void
 4272 xpt_release_simq_timeout(void *arg)
 4273 {
 4274         struct cam_sim *sim;
 4275 
 4276         sim = (struct cam_sim *)arg;
 4277         xpt_release_simq(sim, /* run_queue */ TRUE);
 4278 }
 4279 
 4280 void
 4281 xpt_done(union ccb *done_ccb)
 4282 {
 4283         struct cam_sim *sim;
 4284         int     first;
 4285 
 4286         CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_done\n"));
 4287         if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) != 0) {
 4288                 /*
 4289                  * Queue up the request for handling by our SWI handler
 4290                  * any of the "non-immediate" type of ccbs.
 4291                  */
 4292                 sim = done_ccb->ccb_h.path->bus->sim;
 4293                 TAILQ_INSERT_TAIL(&sim->sim_doneq, &done_ccb->ccb_h,
 4294                     sim_links.tqe);
 4295                 done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX;
 4296                 if ((sim->flags & (CAM_SIM_ON_DONEQ | CAM_SIM_POLLED |
 4297                     CAM_SIM_BATCH)) == 0) {
 4298                         mtx_lock(&cam_simq_lock);
 4299                         first = TAILQ_EMPTY(&cam_simq);
 4300                         TAILQ_INSERT_TAIL(&cam_simq, sim, links);
 4301                         mtx_unlock(&cam_simq_lock);
 4302                         sim->flags |= CAM_SIM_ON_DONEQ;
 4303                         if (first)
 4304                                 swi_sched(cambio_ih, 0);
 4305                 }
 4306         }
 4307 }
 4308 
 4309 void
 4310 xpt_batch_start(struct cam_sim *sim)
 4311 {
 4312 
 4313         KASSERT((sim->flags & CAM_SIM_BATCH) == 0, ("Batch flag already set"));
 4314         sim->flags |= CAM_SIM_BATCH;
 4315 }
 4316 
 4317 void
 4318 xpt_batch_done(struct cam_sim *sim)
 4319 {
 4320 
 4321         KASSERT((sim->flags & CAM_SIM_BATCH) != 0, ("Batch flag was not set"));
 4322         sim->flags &= ~CAM_SIM_BATCH;
 4323         if (!TAILQ_EMPTY(&sim->sim_doneq) &&
 4324             (sim->flags & CAM_SIM_ON_DONEQ) == 0)
 4325                 camisr_runqueue(&sim->sim_doneq);
 4326 }
 4327 
 4328 union ccb *
 4329 xpt_alloc_ccb()
 4330 {
 4331         union ccb *new_ccb;
 4332 
 4333         new_ccb = malloc(sizeof(*new_ccb), M_CAMXPT, M_ZERO|M_WAITOK);
 4334         return (new_ccb);
 4335 }
 4336 
 4337 union ccb *
 4338 xpt_alloc_ccb_nowait()
 4339 {
 4340         union ccb *new_ccb;
 4341 
 4342         new_ccb = malloc(sizeof(*new_ccb), M_CAMXPT, M_ZERO|M_NOWAIT);
 4343         return (new_ccb);
 4344 }
 4345 
 4346 void
 4347 xpt_free_ccb(union ccb *free_ccb)
 4348 {
 4349         free(free_ccb, M_CAMXPT);
 4350 }
 4351 
 4352 
 4353 
 4354 /* Private XPT functions */
 4355 
 4356 /*
 4357  * Get a CAM control block for the caller. Charge the structure to the device
 4358  * referenced by the path.  If the this device has no 'credits' then the
 4359  * device already has the maximum number of outstanding operations under way
 4360  * and we return NULL. If we don't have sufficient resources to allocate more
 4361  * ccbs, we also return NULL.
 4362  */
 4363 static union ccb *
 4364 xpt_get_ccb(struct cam_ed *device)
 4365 {
 4366         union ccb *new_ccb;
 4367         struct cam_sim *sim;
 4368 
 4369         sim = device->sim;
 4370         if ((new_ccb = (union ccb *)SLIST_FIRST(&sim->ccb_freeq)) == NULL) {
 4371                 new_ccb = xpt_alloc_ccb_nowait();
 4372                 if (new_ccb == NULL) {
 4373                         return (NULL);
 4374                 }
 4375                 if ((sim->flags & CAM_SIM_MPSAFE) == 0)
 4376                         callout_handle_init(&new_ccb->ccb_h.timeout_ch);
 4377                 SLIST_INSERT_HEAD(&sim->ccb_freeq, &new_ccb->ccb_h,
 4378                                   xpt_links.sle);
 4379                 sim->ccb_count++;
 4380         }
 4381         cam_ccbq_take_opening(&device->ccbq);
 4382         SLIST_REMOVE_HEAD(&sim->ccb_freeq, xpt_links.sle);
 4383         return (new_ccb);
 4384 }
 4385 
 4386 static void
 4387 xpt_release_bus(struct cam_eb *bus)
 4388 {
 4389 
 4390         mtx_lock(&xsoftc.xpt_topo_lock);
 4391         KASSERT(bus->refcount >= 1, ("bus->refcount >= 1"));
 4392         if ((--bus->refcount == 0)
 4393          && (TAILQ_FIRST(&bus->et_entries) == NULL)) {
 4394                 TAILQ_REMOVE(&xsoftc.xpt_busses, bus, links);
 4395                 xsoftc.bus_generation++;
 4396                 mtx_unlock(&xsoftc.xpt_topo_lock);
 4397                 cam_sim_release(bus->sim);
 4398                 free(bus, M_CAMXPT);
 4399         } else
 4400                 mtx_unlock(&xsoftc.xpt_topo_lock);
 4401 }
 4402 
 4403 static struct cam_et *
 4404 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id)
 4405 {
 4406         struct cam_et *target;
 4407 
 4408         target = (struct cam_et *)malloc(sizeof(*target), M_CAMXPT, M_NOWAIT);
 4409         if (target != NULL) {
 4410                 struct cam_et *cur_target;
 4411 
 4412                 TAILQ_INIT(&target->ed_entries);
 4413                 target->bus = bus;
 4414                 target->target_id = target_id;
 4415                 target->refcount = 1;
 4416                 target->generation = 0;
 4417                 target->luns = NULL;
 4418                 timevalclear(&target->last_reset);
 4419                 /*
 4420                  * Hold a reference to our parent bus so it
 4421                  * will not go away before we do.
 4422                  */
 4423                 mtx_lock(&xsoftc.xpt_topo_lock);
 4424                 bus->refcount++;
 4425                 mtx_unlock(&xsoftc.xpt_topo_lock);
 4426 
 4427                 /* Insertion sort into our bus's target list */
 4428                 cur_target = TAILQ_FIRST(&bus->et_entries);
 4429                 while (cur_target != NULL && cur_target->target_id < target_id)
 4430                         cur_target = TAILQ_NEXT(cur_target, links);
 4431 
 4432                 if (cur_target != NULL) {
 4433                         TAILQ_INSERT_BEFORE(cur_target, target, links);
 4434                 } else {
 4435                         TAILQ_INSERT_TAIL(&bus->et_entries, target, links);
 4436                 }
 4437                 bus->generation++;
 4438         }
 4439         return (target);
 4440 }
 4441 
 4442 static void
 4443 xpt_release_target(struct cam_et *target)
 4444 {
 4445 
 4446         if (target->refcount == 1) {
 4447                 if (TAILQ_FIRST(&target->ed_entries) == NULL) {
 4448                         TAILQ_REMOVE(&target->bus->et_entries, target, links);
 4449                         target->bus->generation++;
 4450                         xpt_release_bus(target->bus);
 4451                         if (target->luns)
 4452                                 free(target->luns, M_CAMXPT);
 4453                         free(target, M_CAMXPT);
 4454                 }
 4455         } else
 4456                 target->refcount--;
 4457 }
 4458 
 4459 static struct cam_ed *
 4460 xpt_alloc_device_default(struct cam_eb *bus, struct cam_et *target,
 4461                          lun_id_t lun_id)
 4462 {
 4463         struct cam_ed *device, *cur_device;
 4464 
 4465         device = xpt_alloc_device(bus, target, lun_id);
 4466         if (device == NULL)
 4467                 return (NULL);
 4468 
 4469         device->mintags = 1;
 4470         device->maxtags = 1;
 4471         bus->sim->max_ccbs += device->ccbq.devq_openings;
 4472         cur_device = TAILQ_FIRST(&target->ed_entries);
 4473         while (cur_device != NULL && cur_device->lun_id < lun_id)
 4474                 cur_device = TAILQ_NEXT(cur_device, links);
 4475         if (cur_device != NULL) {
 4476                 TAILQ_INSERT_BEFORE(cur_device, device, links);
 4477         } else {
 4478                 TAILQ_INSERT_TAIL(&target->ed_entries, device, links);
 4479         }
 4480         target->generation++;
 4481 
 4482         return (device);
 4483 }
 4484 
 4485 struct cam_ed *
 4486 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id)
 4487 {
 4488         struct     cam_ed *device;
 4489         struct     cam_devq *devq;
 4490         cam_status status;
 4491 
 4492         /* Make space for us in the device queue on our bus */
 4493         devq = bus->sim->devq;
 4494         status = cam_devq_resize(devq, devq->alloc_queue.array_size + 1);
 4495 
 4496         if (status != CAM_REQ_CMP) {
 4497                 device = NULL;
 4498         } else {
 4499                 device = (struct cam_ed *)malloc(sizeof(*device),
 4500                                                  M_CAMXPT, M_NOWAIT);
 4501         }
 4502 
 4503         if (device != NULL) {
 4504                 cam_init_pinfo(&device->alloc_ccb_entry.pinfo);
 4505                 device->alloc_ccb_entry.device = device;
 4506                 cam_init_pinfo(&device->send_ccb_entry.pinfo);
 4507                 device->send_ccb_entry.device = device;
 4508                 device->target = target;
 4509                 device->lun_id = lun_id;
 4510                 device->sim = bus->sim;
 4511                 /* Initialize our queues */
 4512                 if (camq_init(&device->drvq, 0) != 0) {
 4513                         free(device, M_CAMXPT);
 4514                         return (NULL);
 4515                 }
 4516                 if (cam_ccbq_init(&device->ccbq,
 4517                                   bus->sim->max_dev_openings) != 0) {
 4518                         camq_fini(&device->drvq);
 4519                         free(device, M_CAMXPT);
 4520                         return (NULL);
 4521                 }
 4522                 SLIST_INIT(&device->asyncs);
 4523                 SLIST_INIT(&device->periphs);
 4524                 device->generation = 0;
 4525                 device->owner = NULL;
 4526                 device->flags = CAM_DEV_UNCONFIGURED;
 4527                 device->tag_delay_count = 0;
 4528                 device->tag_saved_openings = 0;
 4529                 device->refcount = 1;
 4530                 callout_init_mtx(&device->callout, bus->sim->mtx, 0);
 4531 
 4532                 /*
 4533                  * Hold a reference to our parent target so it
 4534                  * will not go away before we do.
 4535                  */
 4536                 target->refcount++;
 4537 
 4538         }
 4539         return (device);
 4540 }
 4541 
 4542 void
 4543 xpt_acquire_device(struct cam_ed *device)
 4544 {
 4545 
 4546         device->refcount++;
 4547 }
 4548 
 4549 void
 4550 xpt_release_device(struct cam_ed *device)
 4551 {
 4552 
 4553         if (device->refcount == 1) {
 4554                 struct cam_devq *devq;
 4555 
 4556                 if (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX
 4557                  || device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX)
 4558                         panic("Removing device while still queued for ccbs");
 4559 
 4560                 if ((device->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0)
 4561                         callout_stop(&device->callout);
 4562 
 4563                 TAILQ_REMOVE(&device->target->ed_entries, device,links);
 4564                 device->target->generation++;
 4565                 device->target->bus->sim->max_ccbs -= device->ccbq.devq_openings;
 4566                 /* Release our slot in the devq */
 4567                 devq = device->target->bus->sim->devq;
 4568                 cam_devq_resize(devq, devq->alloc_queue.array_size - 1);
 4569                 camq_fini(&device->drvq);
 4570                 cam_ccbq_fini(&device->ccbq);
 4571                 xpt_release_target(device->target);
 4572                 free(device, M_CAMXPT);
 4573         } else
 4574                 device->refcount--;
 4575 }
 4576 
 4577 u_int32_t
 4578 xpt_dev_ccbq_resize(struct cam_path *path, int newopenings)
 4579 {
 4580         int     diff;
 4581         int     result;
 4582         struct  cam_ed *dev;
 4583 
 4584         dev = path->device;
 4585 
 4586         diff = newopenings - (dev->ccbq.dev_active + dev->ccbq.dev_openings);
 4587         result = cam_ccbq_resize(&dev->ccbq, newopenings);
 4588         if (result == CAM_REQ_CMP && (diff < 0)) {
 4589                 dev->flags |= CAM_DEV_RESIZE_QUEUE_NEEDED;
 4590         }
 4591         if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
 4592          || (dev->inq_flags & SID_CmdQue) != 0)
 4593                 dev->tag_saved_openings = newopenings;
 4594         /* Adjust the global limit */
 4595         dev->sim->max_ccbs += diff;
 4596         return (result);
 4597 }
 4598 
 4599 static struct cam_eb *
 4600 xpt_find_bus(path_id_t path_id)
 4601 {
 4602         struct cam_eb *bus;
 4603 
 4604         mtx_lock(&xsoftc.xpt_topo_lock);
 4605         for (bus = TAILQ_FIRST(&xsoftc.xpt_busses);
 4606              bus != NULL;
 4607              bus = TAILQ_NEXT(bus, links)) {
 4608                 if (bus->path_id == path_id) {
 4609                         bus->refcount++;
 4610                         break;
 4611                 }
 4612         }
 4613         mtx_unlock(&xsoftc.xpt_topo_lock);
 4614         return (bus);
 4615 }
 4616 
 4617 static struct cam_et *
 4618 xpt_find_target(struct cam_eb *bus, target_id_t target_id)
 4619 {
 4620         struct cam_et *target;
 4621 
 4622         for (target = TAILQ_FIRST(&bus->et_entries);
 4623              target != NULL;
 4624              target = TAILQ_NEXT(target, links)) {
 4625                 if (target->target_id == target_id) {
 4626                         target->refcount++;
 4627                         break;
 4628                 }
 4629         }
 4630         return (target);
 4631 }
 4632 
 4633 static struct cam_ed *
 4634 xpt_find_device(struct cam_et *target, lun_id_t lun_id)
 4635 {
 4636         struct cam_ed *device;
 4637 
 4638         for (device = TAILQ_FIRST(&target->ed_entries);
 4639              device != NULL;
 4640              device = TAILQ_NEXT(device, links)) {
 4641                 if (device->lun_id == lun_id) {
 4642                         device->refcount++;
 4643                         break;
 4644                 }
 4645         }
 4646         return (device);
 4647 }
 4648 
 4649 void
 4650 xpt_start_tags(struct cam_path *path)
 4651 {
 4652         struct ccb_relsim crs;
 4653         struct cam_ed *device;
 4654         struct cam_sim *sim;
 4655         int    newopenings;
 4656 
 4657         device = path->device;
 4658         sim = path->bus->sim;
 4659         device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
 4660         xpt_freeze_devq(path, /*count*/1);
 4661         device->inq_flags |= SID_CmdQue;
 4662         if (device->tag_saved_openings != 0)
 4663                 newopenings = device->tag_saved_openings;
 4664         else
 4665                 newopenings = min(device->maxtags,
 4666                                   sim->max_tagged_dev_openings);
 4667         xpt_dev_ccbq_resize(path, newopenings);
 4668         xpt_async(AC_GETDEV_CHANGED, path, NULL);
 4669         xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL);
 4670         crs.ccb_h.func_code = XPT_REL_SIMQ;
 4671         crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
 4672         crs.openings
 4673             = crs.release_timeout
 4674             = crs.qfrozen_cnt
 4675             = 0;
 4676         xpt_action((union ccb *)&crs);
 4677 }
 4678 
 4679 void
 4680 xpt_stop_tags(struct cam_path *path)
 4681 {
 4682         struct ccb_relsim crs;
 4683         struct cam_ed *device;
 4684         struct cam_sim *sim;
 4685 
 4686         device = path->device;
 4687         sim = path->bus->sim;
 4688         device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
 4689         device->tag_delay_count = 0;
 4690         xpt_freeze_devq(path, /*count*/1);
 4691         device->inq_flags &= ~SID_CmdQue;
 4692         xpt_dev_ccbq_resize(path, sim->max_dev_openings);
 4693         xpt_async(AC_GETDEV_CHANGED, path, NULL);
 4694         xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL);
 4695         crs.ccb_h.func_code = XPT_REL_SIMQ;
 4696         crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
 4697         crs.openings
 4698             = crs.release_timeout
 4699             = crs.qfrozen_cnt
 4700             = 0;
 4701         xpt_action((union ccb *)&crs);
 4702 }
 4703 
 4704 static void
 4705 xpt_boot_delay(void *arg)
 4706 {
 4707 
 4708         xpt_release_boot();
 4709 }
 4710 
 4711 static void
 4712 xpt_config(void *arg)
 4713 {
 4714         /*
 4715          * Now that interrupts are enabled, go find our devices
 4716          */
 4717 
 4718         /* Setup debugging path */
 4719         if (cam_dflags != CAM_DEBUG_NONE) {
 4720                 /*
 4721                  * Locking is specifically omitted here.  No SIMs have
 4722                  * registered yet, so xpt_create_path will only be searching
 4723                  * empty lists of targets and devices.
 4724                  */
 4725                 if (xpt_create_path(&cam_dpath, xpt_periph,
 4726                                     CAM_DEBUG_BUS, CAM_DEBUG_TARGET,
 4727                                     CAM_DEBUG_LUN) != CAM_REQ_CMP) {
 4728                         printf("xpt_config: xpt_create_path() failed for debug"
 4729                                " target %d:%d:%d, debugging disabled\n",
 4730                                CAM_DEBUG_BUS, CAM_DEBUG_TARGET, CAM_DEBUG_LUN);
 4731                         cam_dflags = CAM_DEBUG_NONE;
 4732                 }
 4733         } else
 4734                 cam_dpath = NULL;
 4735 
 4736         periphdriver_init(1);
 4737         xpt_hold_boot();
 4738         callout_init(&xsoftc.boot_callout, 1);
 4739         callout_reset(&xsoftc.boot_callout, hz * xsoftc.boot_delay / 1000,
 4740             xpt_boot_delay, NULL);
 4741         /* Fire up rescan thread. */
 4742         if (kproc_create(xpt_scanner_thread, NULL, NULL, 0, 0, "xpt_thrd")) {
 4743                 printf("xpt_init: failed to create rescan thread\n");
 4744         }
 4745 }
 4746 
 4747 void
 4748 xpt_hold_boot(void)
 4749 {
 4750         xpt_lock_buses();
 4751         xsoftc.buses_to_config++;
 4752         xpt_unlock_buses();
 4753 }
 4754 
 4755 void
 4756 xpt_release_boot(void)
 4757 {
 4758         xpt_lock_buses();
 4759         xsoftc.buses_to_config--;
 4760         if (xsoftc.buses_to_config == 0 && xsoftc.buses_config_done == 0) {
 4761                 struct  xpt_task *task;
 4762 
 4763                 xsoftc.buses_config_done = 1;
 4764                 xpt_unlock_buses();
 4765                 /* Call manually because we don't have any busses */
 4766                 task = malloc(sizeof(struct xpt_task), M_CAMXPT, M_NOWAIT);
 4767                 if (task != NULL) {
 4768                         TASK_INIT(&task->task, 0, xpt_finishconfig_task, task);
 4769                         taskqueue_enqueue(taskqueue_thread, &task->task);
 4770                 }
 4771         } else
 4772                 xpt_unlock_buses();
 4773 }
 4774 
 4775 /*
 4776  * If the given device only has one peripheral attached to it, and if that
 4777  * peripheral is the passthrough driver, announce it.  This insures that the
 4778  * user sees some sort of announcement for every peripheral in their system.
 4779  */
 4780 static int
 4781 xptpassannouncefunc(struct cam_ed *device, void *arg)
 4782 {
 4783         struct cam_periph *periph;
 4784         int i;
 4785 
 4786         for (periph = SLIST_FIRST(&device->periphs), i = 0; periph != NULL;
 4787              periph = SLIST_NEXT(periph, periph_links), i++);
 4788 
 4789         periph = SLIST_FIRST(&device->periphs);
 4790         if ((i == 1)
 4791          && (strncmp(periph->periph_name, "pass", 4) == 0))
 4792                 xpt_announce_periph(periph, NULL);
 4793 
 4794         return(1);
 4795 }
 4796 
 4797 static void
 4798 xpt_finishconfig_task(void *context, int pending)
 4799 {
 4800 
 4801         periphdriver_init(2);
 4802         /*
 4803          * Check for devices with no "standard" peripheral driver
 4804          * attached.  For any devices like that, announce the
 4805          * passthrough driver so the user will see something.
 4806          */
 4807         xpt_for_all_devices(xptpassannouncefunc, NULL);
 4808 
 4809         /* Release our hook so that the boot can continue. */
 4810         config_intrhook_disestablish(xsoftc.xpt_config_hook);
 4811         free(xsoftc.xpt_config_hook, M_CAMXPT);
 4812         xsoftc.xpt_config_hook = NULL;
 4813 
 4814         free(context, M_CAMXPT);
 4815 }
 4816 
 4817 cam_status
 4818 xpt_register_async(int event, ac_callback_t *cbfunc, void *cbarg,
 4819                    struct cam_path *path)
 4820 {
 4821         struct ccb_setasync csa;
 4822         cam_status status;
 4823         int xptpath = 0;
 4824 
 4825         if (path == NULL) {
 4826                 mtx_lock(&xsoftc.xpt_lock);
 4827                 status = xpt_create_path(&path, /*periph*/NULL, CAM_XPT_PATH_ID,
 4828                                          CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
 4829                 if (status != CAM_REQ_CMP) {
 4830                         mtx_unlock(&xsoftc.xpt_lock);
 4831                         return (status);
 4832                 }
 4833                 xptpath = 1;
 4834         }
 4835 
 4836         xpt_setup_ccb(&csa.ccb_h, path, CAM_PRIORITY_NORMAL);
 4837         csa.ccb_h.func_code = XPT_SASYNC_CB;
 4838         csa.event_enable = event;
 4839         csa.callback = cbfunc;
 4840         csa.callback_arg = cbarg;
 4841         xpt_action((union ccb *)&csa);
 4842         status = csa.ccb_h.status;
 4843         if (xptpath) {
 4844                 xpt_free_path(path);
 4845                 mtx_unlock(&xsoftc.xpt_lock);
 4846 
 4847                 if ((status == CAM_REQ_CMP) &&
 4848                     (csa.event_enable & AC_FOUND_DEVICE)) {
 4849                         /*
 4850                          * Get this peripheral up to date with all
 4851                          * the currently existing devices.
 4852                          */
 4853                         xpt_for_all_devices(xptsetasyncfunc, &csa);
 4854                 }
 4855                 if ((status == CAM_REQ_CMP) &&
 4856                     (csa.event_enable & AC_PATH_REGISTERED)) {
 4857                         /*
 4858                          * Get this peripheral up to date with all
 4859                          * the currently existing busses.
 4860                          */
 4861                         xpt_for_all_busses(xptsetasyncbusfunc, &csa);
 4862                 }
 4863         }
 4864         return (status);
 4865 }
 4866 
 4867 static void
 4868 xptaction(struct cam_sim *sim, union ccb *work_ccb)
 4869 {
 4870         CAM_DEBUG(work_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xptaction\n"));
 4871 
 4872         switch (work_ccb->ccb_h.func_code) {
 4873         /* Common cases first */
 4874         case XPT_PATH_INQ:              /* Path routing inquiry */
 4875         {
 4876                 struct ccb_pathinq *cpi;
 4877 
 4878                 cpi = &work_ccb->cpi;
 4879                 cpi->version_num = 1; /* XXX??? */
 4880                 cpi->hba_inquiry = 0;
 4881                 cpi->target_sprt = 0;
 4882                 cpi->hba_misc = 0;
 4883                 cpi->hba_eng_cnt = 0;
 4884                 cpi->max_target = 0;
 4885                 cpi->max_lun = 0;
 4886                 cpi->initiator_id = 0;
 4887                 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
 4888                 strncpy(cpi->hba_vid, "", HBA_IDLEN);
 4889                 strncpy(cpi->dev_name, sim->sim_name, DEV_IDLEN);
 4890                 cpi->unit_number = sim->unit_number;
 4891                 cpi->bus_id = sim->bus_id;
 4892                 cpi->base_transfer_speed = 0;
 4893                 cpi->protocol = PROTO_UNSPECIFIED;
 4894                 cpi->protocol_version = PROTO_VERSION_UNSPECIFIED;
 4895                 cpi->transport = XPORT_UNSPECIFIED;
 4896                 cpi->transport_version = XPORT_VERSION_UNSPECIFIED;
 4897                 cpi->ccb_h.status = CAM_REQ_CMP;
 4898                 xpt_done(work_ccb);
 4899                 break;
 4900         }
 4901         default:
 4902                 work_ccb->ccb_h.status = CAM_REQ_INVALID;
 4903                 xpt_done(work_ccb);
 4904                 break;
 4905         }
 4906 }
 4907 
 4908 /*
 4909  * The xpt as a "controller" has no interrupt sources, so polling
 4910  * is a no-op.
 4911  */
 4912 static void
 4913 xptpoll(struct cam_sim *sim)
 4914 {
 4915 }
 4916 
 4917 void
 4918 xpt_lock_buses(void)
 4919 {
 4920         mtx_lock(&xsoftc.xpt_topo_lock);
 4921 }
 4922 
 4923 void
 4924 xpt_unlock_buses(void)
 4925 {
 4926         mtx_unlock(&xsoftc.xpt_topo_lock);
 4927 }
 4928 
 4929 static void
 4930 camisr(void *dummy)
 4931 {
 4932         cam_simq_t queue;
 4933         struct cam_sim *sim;
 4934 
 4935         mtx_lock(&cam_simq_lock);
 4936         TAILQ_INIT(&queue);
 4937         while (!TAILQ_EMPTY(&cam_simq)) {
 4938                 TAILQ_CONCAT(&queue, &cam_simq, links);
 4939                 mtx_unlock(&cam_simq_lock);
 4940 
 4941                 while ((sim = TAILQ_FIRST(&queue)) != NULL) {
 4942                         TAILQ_REMOVE(&queue, sim, links);
 4943                         CAM_SIM_LOCK(sim);
 4944                         camisr_runqueue(&sim->sim_doneq);
 4945                         sim->flags &= ~CAM_SIM_ON_DONEQ;
 4946                         CAM_SIM_UNLOCK(sim);
 4947                 }
 4948                 mtx_lock(&cam_simq_lock);
 4949         }
 4950         mtx_unlock(&cam_simq_lock);
 4951 }
 4952 
 4953 static void
 4954 camisr_runqueue(void *V_queue)
 4955 {
 4956         cam_isrq_t *queue = V_queue;
 4957         struct  ccb_hdr *ccb_h;
 4958 
 4959         while ((ccb_h = TAILQ_FIRST(queue)) != NULL) {
 4960                 int     runq;
 4961 
 4962                 TAILQ_REMOVE(queue, ccb_h, sim_links.tqe);
 4963                 ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
 4964 
 4965                 CAM_DEBUG(ccb_h->path, CAM_DEBUG_TRACE,
 4966                           ("camisr\n"));
 4967 
 4968                 runq = FALSE;
 4969 
 4970                 if (ccb_h->flags & CAM_HIGH_POWER) {
 4971                         struct highpowerlist    *hphead;
 4972                         union ccb               *send_ccb;
 4973 
 4974                         mtx_lock(&xsoftc.xpt_lock);
 4975                         hphead = &xsoftc.highpowerq;
 4976 
 4977                         send_ccb = (union ccb *)STAILQ_FIRST(hphead);
 4978 
 4979                         /*
 4980                          * Increment the count since this command is done.
 4981                          */
 4982                         xsoftc.num_highpower++;
 4983 
 4984                         /*
 4985                          * Any high powered commands queued up?
 4986                          */
 4987                         if (send_ccb != NULL) {
 4988 
 4989                                 STAILQ_REMOVE_HEAD(hphead, xpt_links.stqe);
 4990                                 mtx_unlock(&xsoftc.xpt_lock);
 4991 
 4992                                 xpt_release_devq(send_ccb->ccb_h.path,
 4993                                                  /*count*/1, /*runqueue*/TRUE);
 4994                         } else
 4995                                 mtx_unlock(&xsoftc.xpt_lock);
 4996                 }
 4997 
 4998                 if ((ccb_h->func_code & XPT_FC_USER_CCB) == 0) {
 4999                         struct cam_ed *dev;
 5000 
 5001                         dev = ccb_h->path->device;
 5002 
 5003                         cam_ccbq_ccb_done(&dev->ccbq, (union ccb *)ccb_h);
 5004                         ccb_h->path->bus->sim->devq->send_active--;
 5005                         ccb_h->path->bus->sim->devq->send_openings++;
 5006                         runq = TRUE;
 5007 
 5008                         if (((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
 5009                           && (dev->ccbq.dev_active == 0))) {
 5010                                 dev->flags &= ~CAM_DEV_REL_ON_QUEUE_EMPTY;
 5011                                 xpt_release_devq(ccb_h->path, /*count*/1,
 5012                                                  /*run_queue*/FALSE);
 5013                         }
 5014 
 5015                         if (((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0
 5016                           && (ccb_h->status&CAM_STATUS_MASK) != CAM_REQUEUE_REQ)) {
 5017                                 dev->flags &= ~CAM_DEV_REL_ON_COMPLETE;
 5018                                 xpt_release_devq(ccb_h->path, /*count*/1,
 5019                                                  /*run_queue*/FALSE);
 5020                         }
 5021 
 5022                         if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
 5023                          && (--dev->tag_delay_count == 0))
 5024                                 xpt_start_tags(ccb_h->path);
 5025                         if (!device_is_send_queued(dev))
 5026                                 xpt_schedule_dev_sendq(ccb_h->path->bus, dev);
 5027                 }
 5028 
 5029                 if (ccb_h->status & CAM_RELEASE_SIMQ) {
 5030                         xpt_release_simq(ccb_h->path->bus->sim,
 5031                                          /*run_queue*/TRUE);
 5032                         ccb_h->status &= ~CAM_RELEASE_SIMQ;
 5033                         runq = FALSE;
 5034                 }
 5035 
 5036                 if ((ccb_h->flags & CAM_DEV_QFRZDIS)
 5037                  && (ccb_h->status & CAM_DEV_QFRZN)) {
 5038                         xpt_release_devq(ccb_h->path, /*count*/1,
 5039                                          /*run_queue*/TRUE);
 5040                         ccb_h->status &= ~CAM_DEV_QFRZN;
 5041                 } else if (runq) {
 5042                         xpt_run_dev_sendq(ccb_h->path->bus);
 5043                 }
 5044 
 5045                 /* Call the peripheral driver's callback */
 5046                 (*ccb_h->cbfcnp)(ccb_h->path->periph, (union ccb *)ccb_h);
 5047         }
 5048 }

Cache object: 357271c883ff5c2bbf11bad722c8b9aa


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.