The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/cam/cam_xpt.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Implementation of the Common Access Method Transport (XPT) layer.
    3  *
    4  * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs.
    5  * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry.
    6  * All rights reserved.
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions, and the following disclaimer,
   13  *    without modification, immediately at the beginning of the file.
   14  * 2. The name of the author may not be used to endorse or promote products
   15  *    derived from this software without specific prior written permission.
   16  *
   17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   20  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
   21  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   27  * SUCH DAMAGE.
   28  */
   29 
   30 #include <sys/cdefs.h>
   31 __FBSDID("$FreeBSD: releng/9.0/sys/cam/cam_xpt.c 224806 2011-08-12 20:09:38Z mjacob $");
   32 
   33 #include <sys/param.h>
   34 #include <sys/bus.h>
   35 #include <sys/systm.h>
   36 #include <sys/types.h>
   37 #include <sys/malloc.h>
   38 #include <sys/kernel.h>
   39 #include <sys/time.h>
   40 #include <sys/conf.h>
   41 #include <sys/fcntl.h>
   42 #include <sys/interrupt.h>
   43 #include <sys/sbuf.h>
   44 #include <sys/taskqueue.h>
   45 
   46 #include <sys/lock.h>
   47 #include <sys/mutex.h>
   48 #include <sys/sysctl.h>
   49 #include <sys/kthread.h>
   50 
   51 #ifdef PC98
   52 #include <pc98/pc98/pc98_machdep.h>     /* geometry translation */
   53 #endif
   54 
   55 #include <cam/cam.h>
   56 #include <cam/cam_ccb.h>
   57 #include <cam/cam_periph.h>
   58 #include <cam/cam_queue.h>
   59 #include <cam/cam_sim.h>
   60 #include <cam/cam_xpt.h>
   61 #include <cam/cam_xpt_sim.h>
   62 #include <cam/cam_xpt_periph.h>
   63 #include <cam/cam_xpt_internal.h>
   64 #include <cam/cam_debug.h>
   65 
   66 #include <cam/scsi/scsi_all.h>
   67 #include <cam/scsi/scsi_message.h>
   68 #include <cam/scsi/scsi_pass.h>
   69 #include <machine/stdarg.h>     /* for xpt_print below */
   70 #include "opt_cam.h"
   71 
   72 /*
   73  * This is the maximum number of high powered commands (e.g. start unit)
   74  * that can be outstanding at a particular time.
   75  */
   76 #ifndef CAM_MAX_HIGHPOWER
   77 #define CAM_MAX_HIGHPOWER  4
   78 #endif
   79 
   80 /* Datastructures internal to the xpt layer */
   81 MALLOC_DEFINE(M_CAMXPT, "CAM XPT", "CAM XPT buffers");
   82 
   83 /* Object for defering XPT actions to a taskqueue */
   84 struct xpt_task {
   85         struct task     task;
   86         void            *data1;
   87         uintptr_t       data2;
   88 };
   89 
   90 typedef enum {
   91         XPT_FLAG_OPEN           = 0x01
   92 } xpt_flags;
   93 
   94 struct xpt_softc {
   95         xpt_flags               flags;
   96         u_int32_t               xpt_generation;
   97 
   98         /* number of high powered commands that can go through right now */
   99         STAILQ_HEAD(highpowerlist, ccb_hdr)     highpowerq;
  100         int                     num_highpower;
  101 
  102         /* queue for handling async rescan requests. */
  103         TAILQ_HEAD(, ccb_hdr) ccb_scanq;
  104         int buses_to_config;
  105         int buses_config_done;
  106 
  107         /* Registered busses */
  108         TAILQ_HEAD(,cam_eb)     xpt_busses;
  109         u_int                   bus_generation;
  110 
  111         struct intr_config_hook *xpt_config_hook;
  112 
  113         int                     boot_delay;
  114         struct callout          boot_callout;
  115 
  116         struct mtx              xpt_topo_lock;
  117         struct mtx              xpt_lock;
  118 };
  119 
  120 typedef enum {
  121         DM_RET_COPY             = 0x01,
  122         DM_RET_FLAG_MASK        = 0x0f,
  123         DM_RET_NONE             = 0x00,
  124         DM_RET_STOP             = 0x10,
  125         DM_RET_DESCEND          = 0x20,
  126         DM_RET_ERROR            = 0x30,
  127         DM_RET_ACTION_MASK      = 0xf0
  128 } dev_match_ret;
  129 
  130 typedef enum {
  131         XPT_DEPTH_BUS,
  132         XPT_DEPTH_TARGET,
  133         XPT_DEPTH_DEVICE,
  134         XPT_DEPTH_PERIPH
  135 } xpt_traverse_depth;
  136 
  137 struct xpt_traverse_config {
  138         xpt_traverse_depth      depth;
  139         void                    *tr_func;
  140         void                    *tr_arg;
  141 };
  142 
  143 typedef int     xpt_busfunc_t (struct cam_eb *bus, void *arg);
  144 typedef int     xpt_targetfunc_t (struct cam_et *target, void *arg);
  145 typedef int     xpt_devicefunc_t (struct cam_ed *device, void *arg);
  146 typedef int     xpt_periphfunc_t (struct cam_periph *periph, void *arg);
  147 typedef int     xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg);
  148 
  149 /* Transport layer configuration information */
  150 static struct xpt_softc xsoftc;
  151 
  152 TUNABLE_INT("kern.cam.boot_delay", &xsoftc.boot_delay);
  153 SYSCTL_INT(_kern_cam, OID_AUTO, boot_delay, CTLFLAG_RDTUN,
  154            &xsoftc.boot_delay, 0, "Bus registration wait time");
  155 
  156 /* Queues for our software interrupt handler */
  157 typedef TAILQ_HEAD(cam_isrq, ccb_hdr) cam_isrq_t;
  158 typedef TAILQ_HEAD(cam_simq, cam_sim) cam_simq_t;
  159 static cam_simq_t cam_simq;
  160 static struct mtx cam_simq_lock;
  161 
  162 /* Pointers to software interrupt handlers */
  163 static void *cambio_ih;
  164 
  165 struct cam_periph *xpt_periph;
  166 
  167 static periph_init_t xpt_periph_init;
  168 
  169 static struct periph_driver xpt_driver =
  170 {
  171         xpt_periph_init, "xpt",
  172         TAILQ_HEAD_INITIALIZER(xpt_driver.units), /* generation */ 0,
  173         CAM_PERIPH_DRV_EARLY
  174 };
  175 
  176 PERIPHDRIVER_DECLARE(xpt, xpt_driver);
  177 
  178 static d_open_t xptopen;
  179 static d_close_t xptclose;
  180 static d_ioctl_t xptioctl;
  181 
  182 static struct cdevsw xpt_cdevsw = {
  183         .d_version =    D_VERSION,
  184         .d_flags =      0,
  185         .d_open =       xptopen,
  186         .d_close =      xptclose,
  187         .d_ioctl =      xptioctl,
  188         .d_name =       "xpt",
  189 };
  190 
  191 /* Storage for debugging datastructures */
  192 #ifdef  CAMDEBUG
  193 struct cam_path *cam_dpath;
  194 #ifdef  CAM_DEBUG_FLAGS
  195 u_int32_t cam_dflags = CAM_DEBUG_FLAGS;
  196 #else
  197 u_int32_t cam_dflags = CAM_DEBUG_NONE;
  198 #endif
  199 TUNABLE_INT("kern.cam.dflags", &cam_dflags);
  200 SYSCTL_UINT(_kern_cam, OID_AUTO, dflags, CTLFLAG_RW,
  201         &cam_dflags, 0, "Cam Debug Flags");
  202 u_int32_t cam_debug_delay;
  203 TUNABLE_INT("kern.cam.debug_delay", &cam_debug_delay);
  204 SYSCTL_UINT(_kern_cam, OID_AUTO, debug_delay, CTLFLAG_RW,
  205         &cam_debug_delay, 0, "Cam Debug Flags");
  206 #endif
  207 
  208 /* Our boot-time initialization hook */
  209 static int cam_module_event_handler(module_t, int /*modeventtype_t*/, void *);
  210 
  211 static moduledata_t cam_moduledata = {
  212         "cam",
  213         cam_module_event_handler,
  214         NULL
  215 };
  216 
  217 static int      xpt_init(void *);
  218 
  219 DECLARE_MODULE(cam, cam_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND);
  220 MODULE_VERSION(cam, 1);
  221 
  222 
  223 static void             xpt_async_bcast(struct async_list *async_head,
  224                                         u_int32_t async_code,
  225                                         struct cam_path *path,
  226                                         void *async_arg);
  227 static path_id_t xptnextfreepathid(void);
  228 static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus);
  229 static union ccb *xpt_get_ccb(struct cam_ed *device);
  230 static void      xpt_run_dev_allocq(struct cam_eb *bus);
  231 static void      xpt_run_dev_sendq(struct cam_eb *bus);
  232 static timeout_t xpt_release_devq_timeout;
  233 static void      xpt_release_simq_timeout(void *arg) __unused;
  234 static void      xpt_release_bus(struct cam_eb *bus);
  235 static void      xpt_release_devq_device(struct cam_ed *dev, cam_rl rl,
  236                     u_int count, int run_queue);
  237 static struct cam_et*
  238                  xpt_alloc_target(struct cam_eb *bus, target_id_t target_id);
  239 static void      xpt_release_target(struct cam_et *target);
  240 static struct cam_eb*
  241                  xpt_find_bus(path_id_t path_id);
  242 static struct cam_et*
  243                  xpt_find_target(struct cam_eb *bus, target_id_t target_id);
  244 static struct cam_ed*
  245                  xpt_find_device(struct cam_et *target, lun_id_t lun_id);
  246 static void      xpt_config(void *arg);
  247 static xpt_devicefunc_t xptpassannouncefunc;
  248 static void      xptaction(struct cam_sim *sim, union ccb *work_ccb);
  249 static void      xptpoll(struct cam_sim *sim);
  250 static void      camisr(void *);
  251 static void      camisr_runqueue(void *);
  252 static dev_match_ret    xptbusmatch(struct dev_match_pattern *patterns,
  253                                     u_int num_patterns, struct cam_eb *bus);
  254 static dev_match_ret    xptdevicematch(struct dev_match_pattern *patterns,
  255                                        u_int num_patterns,
  256                                        struct cam_ed *device);
  257 static dev_match_ret    xptperiphmatch(struct dev_match_pattern *patterns,
  258                                        u_int num_patterns,
  259                                        struct cam_periph *periph);
  260 static xpt_busfunc_t    xptedtbusfunc;
  261 static xpt_targetfunc_t xptedttargetfunc;
  262 static xpt_devicefunc_t xptedtdevicefunc;
  263 static xpt_periphfunc_t xptedtperiphfunc;
  264 static xpt_pdrvfunc_t   xptplistpdrvfunc;
  265 static xpt_periphfunc_t xptplistperiphfunc;
  266 static int              xptedtmatch(struct ccb_dev_match *cdm);
  267 static int              xptperiphlistmatch(struct ccb_dev_match *cdm);
  268 static int              xptbustraverse(struct cam_eb *start_bus,
  269                                        xpt_busfunc_t *tr_func, void *arg);
  270 static int              xpttargettraverse(struct cam_eb *bus,
  271                                           struct cam_et *start_target,
  272                                           xpt_targetfunc_t *tr_func, void *arg);
  273 static int              xptdevicetraverse(struct cam_et *target,
  274                                           struct cam_ed *start_device,
  275                                           xpt_devicefunc_t *tr_func, void *arg);
  276 static int              xptperiphtraverse(struct cam_ed *device,
  277                                           struct cam_periph *start_periph,
  278                                           xpt_periphfunc_t *tr_func, void *arg);
  279 static int              xptpdrvtraverse(struct periph_driver **start_pdrv,
  280                                         xpt_pdrvfunc_t *tr_func, void *arg);
  281 static int              xptpdperiphtraverse(struct periph_driver **pdrv,
  282                                             struct cam_periph *start_periph,
  283                                             xpt_periphfunc_t *tr_func,
  284                                             void *arg);
  285 static xpt_busfunc_t    xptdefbusfunc;
  286 static xpt_targetfunc_t xptdeftargetfunc;
  287 static xpt_devicefunc_t xptdefdevicefunc;
  288 static xpt_periphfunc_t xptdefperiphfunc;
  289 static void             xpt_finishconfig_task(void *context, int pending);
  290 static void             xpt_dev_async_default(u_int32_t async_code,
  291                                               struct cam_eb *bus,
  292                                               struct cam_et *target,
  293                                               struct cam_ed *device,
  294                                               void *async_arg);
  295 static struct cam_ed *  xpt_alloc_device_default(struct cam_eb *bus,
  296                                                  struct cam_et *target,
  297                                                  lun_id_t lun_id);
  298 static xpt_devicefunc_t xptsetasyncfunc;
  299 static xpt_busfunc_t    xptsetasyncbusfunc;
  300 static cam_status       xptregister(struct cam_periph *periph,
  301                                     void *arg);
  302 static __inline int periph_is_queued(struct cam_periph *periph);
  303 static __inline int device_is_alloc_queued(struct cam_ed *device);
  304 static __inline int device_is_send_queued(struct cam_ed *device);
  305 
  306 static __inline int
  307 xpt_schedule_dev_allocq(struct cam_eb *bus, struct cam_ed *dev)
  308 {
  309         int retval;
  310 
  311         if ((dev->drvq.entries > 0) &&
  312             (dev->ccbq.devq_openings > 0) &&
  313             (cam_ccbq_frozen(&dev->ccbq, CAM_PRIORITY_TO_RL(
  314                 CAMQ_GET_PRIO(&dev->drvq))) == 0)) {
  315                 /*
  316                  * The priority of a device waiting for CCB resources
  317                  * is that of the highest priority peripheral driver
  318                  * enqueued.
  319                  */
  320                 retval = xpt_schedule_dev(&bus->sim->devq->alloc_queue,
  321                                           &dev->alloc_ccb_entry.pinfo,
  322                                           CAMQ_GET_PRIO(&dev->drvq));
  323         } else {
  324                 retval = 0;
  325         }
  326 
  327         return (retval);
  328 }
  329 
  330 static __inline int
  331 xpt_schedule_dev_sendq(struct cam_eb *bus, struct cam_ed *dev)
  332 {
  333         int     retval;
  334 
  335         if ((dev->ccbq.queue.entries > 0) &&
  336             (dev->ccbq.dev_openings > 0) &&
  337             (cam_ccbq_frozen_top(&dev->ccbq) == 0)) {
  338                 /*
  339                  * The priority of a device waiting for controller
  340                  * resources is that of the highest priority CCB
  341                  * enqueued.
  342                  */
  343                 retval =
  344                     xpt_schedule_dev(&bus->sim->devq->send_queue,
  345                                      &dev->send_ccb_entry.pinfo,
  346                                      CAMQ_GET_PRIO(&dev->ccbq.queue));
  347         } else {
  348                 retval = 0;
  349         }
  350         return (retval);
  351 }
  352 
  353 static __inline int
  354 periph_is_queued(struct cam_periph *periph)
  355 {
  356         return (periph->pinfo.index != CAM_UNQUEUED_INDEX);
  357 }
  358 
  359 static __inline int
  360 device_is_alloc_queued(struct cam_ed *device)
  361 {
  362         return (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
  363 }
  364 
  365 static __inline int
  366 device_is_send_queued(struct cam_ed *device)
  367 {
  368         return (device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
  369 }
  370 
  371 static void
  372 xpt_periph_init()
  373 {
  374         make_dev(&xpt_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "xpt0");
  375 }
  376 
  377 static void
  378 xptdone(struct cam_periph *periph, union ccb *done_ccb)
  379 {
  380         /* Caller will release the CCB */
  381         wakeup(&done_ccb->ccb_h.cbfcnp);
  382 }
  383 
  384 static int
  385 xptopen(struct cdev *dev, int flags, int fmt, struct thread *td)
  386 {
  387 
  388         /*
  389          * Only allow read-write access.
  390          */
  391         if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0))
  392                 return(EPERM);
  393 
  394         /*
  395          * We don't allow nonblocking access.
  396          */
  397         if ((flags & O_NONBLOCK) != 0) {
  398                 printf("%s: can't do nonblocking access\n", devtoname(dev));
  399                 return(ENODEV);
  400         }
  401 
  402         /* Mark ourselves open */
  403         mtx_lock(&xsoftc.xpt_lock);
  404         xsoftc.flags |= XPT_FLAG_OPEN;
  405         mtx_unlock(&xsoftc.xpt_lock);
  406 
  407         return(0);
  408 }
  409 
  410 static int
  411 xptclose(struct cdev *dev, int flag, int fmt, struct thread *td)
  412 {
  413 
  414         /* Mark ourselves closed */
  415         mtx_lock(&xsoftc.xpt_lock);
  416         xsoftc.flags &= ~XPT_FLAG_OPEN;
  417         mtx_unlock(&xsoftc.xpt_lock);
  418 
  419         return(0);
  420 }
  421 
  422 /*
  423  * Don't automatically grab the xpt softc lock here even though this is going
  424  * through the xpt device.  The xpt device is really just a back door for
  425  * accessing other devices and SIMs, so the right thing to do is to grab
  426  * the appropriate SIM lock once the bus/SIM is located.
  427  */
  428 static int
  429 xptioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
  430 {
  431         int error;
  432 
  433         error = 0;
  434 
  435         switch(cmd) {
  436         /*
  437          * For the transport layer CAMIOCOMMAND ioctl, we really only want
  438          * to accept CCB types that don't quite make sense to send through a
  439          * passthrough driver. XPT_PATH_INQ is an exception to this, as stated
  440          * in the CAM spec.
  441          */
  442         case CAMIOCOMMAND: {
  443                 union ccb *ccb;
  444                 union ccb *inccb;
  445                 struct cam_eb *bus;
  446 
  447                 inccb = (union ccb *)addr;
  448 
  449                 bus = xpt_find_bus(inccb->ccb_h.path_id);
  450                 if (bus == NULL)
  451                         return (EINVAL);
  452 
  453                 switch (inccb->ccb_h.func_code) {
  454                 case XPT_SCAN_BUS:
  455                 case XPT_RESET_BUS:
  456                         if (inccb->ccb_h.target_id != CAM_TARGET_WILDCARD ||
  457                             inccb->ccb_h.target_lun != CAM_LUN_WILDCARD) {
  458                                 xpt_release_bus(bus);
  459                                 return (EINVAL);
  460                         }
  461                         break;
  462                 case XPT_SCAN_TGT:
  463                         if (inccb->ccb_h.target_id == CAM_TARGET_WILDCARD ||
  464                             inccb->ccb_h.target_lun != CAM_LUN_WILDCARD) {
  465                                 xpt_release_bus(bus);
  466                                 return (EINVAL);
  467                         }
  468                         break;
  469                 default:
  470                         break;
  471                 }
  472 
  473                 switch(inccb->ccb_h.func_code) {
  474                 case XPT_SCAN_BUS:
  475                 case XPT_RESET_BUS:
  476                 case XPT_PATH_INQ:
  477                 case XPT_ENG_INQ:
  478                 case XPT_SCAN_LUN:
  479                 case XPT_SCAN_TGT:
  480 
  481                         ccb = xpt_alloc_ccb();
  482 
  483                         CAM_SIM_LOCK(bus->sim);
  484 
  485                         /*
  486                          * Create a path using the bus, target, and lun the
  487                          * user passed in.
  488                          */
  489                         if (xpt_create_path(&ccb->ccb_h.path, xpt_periph,
  490                                             inccb->ccb_h.path_id,
  491                                             inccb->ccb_h.target_id,
  492                                             inccb->ccb_h.target_lun) !=
  493                                             CAM_REQ_CMP){
  494                                 error = EINVAL;
  495                                 CAM_SIM_UNLOCK(bus->sim);
  496                                 xpt_free_ccb(ccb);
  497                                 break;
  498                         }
  499                         /* Ensure all of our fields are correct */
  500                         xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path,
  501                                       inccb->ccb_h.pinfo.priority);
  502                         xpt_merge_ccb(ccb, inccb);
  503                         ccb->ccb_h.cbfcnp = xptdone;
  504                         cam_periph_runccb(ccb, NULL, 0, 0, NULL);
  505                         bcopy(ccb, inccb, sizeof(union ccb));
  506                         xpt_free_path(ccb->ccb_h.path);
  507                         xpt_free_ccb(ccb);
  508                         CAM_SIM_UNLOCK(bus->sim);
  509                         break;
  510 
  511                 case XPT_DEBUG: {
  512                         union ccb ccb;
  513 
  514                         /*
  515                          * This is an immediate CCB, so it's okay to
  516                          * allocate it on the stack.
  517                          */
  518 
  519                         CAM_SIM_LOCK(bus->sim);
  520 
  521                         /*
  522                          * Create a path using the bus, target, and lun the
  523                          * user passed in.
  524                          */
  525                         if (xpt_create_path(&ccb.ccb_h.path, xpt_periph,
  526                                             inccb->ccb_h.path_id,
  527                                             inccb->ccb_h.target_id,
  528                                             inccb->ccb_h.target_lun) !=
  529                                             CAM_REQ_CMP){
  530                                 error = EINVAL;
  531                                 CAM_SIM_UNLOCK(bus->sim);
  532                                 break;
  533                         }
  534                         /* Ensure all of our fields are correct */
  535                         xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path,
  536                                       inccb->ccb_h.pinfo.priority);
  537                         xpt_merge_ccb(&ccb, inccb);
  538                         ccb.ccb_h.cbfcnp = xptdone;
  539                         xpt_action(&ccb);
  540                         CAM_SIM_UNLOCK(bus->sim);
  541                         bcopy(&ccb, inccb, sizeof(union ccb));
  542                         xpt_free_path(ccb.ccb_h.path);
  543                         break;
  544 
  545                 }
  546                 case XPT_DEV_MATCH: {
  547                         struct cam_periph_map_info mapinfo;
  548                         struct cam_path *old_path;
  549 
  550                         /*
  551                          * We can't deal with physical addresses for this
  552                          * type of transaction.
  553                          */
  554                         if (inccb->ccb_h.flags & CAM_DATA_PHYS) {
  555                                 error = EINVAL;
  556                                 break;
  557                         }
  558 
  559                         /*
  560                          * Save this in case the caller had it set to
  561                          * something in particular.
  562                          */
  563                         old_path = inccb->ccb_h.path;
  564 
  565                         /*
  566                          * We really don't need a path for the matching
  567                          * code.  The path is needed because of the
  568                          * debugging statements in xpt_action().  They
  569                          * assume that the CCB has a valid path.
  570                          */
  571                         inccb->ccb_h.path = xpt_periph->path;
  572 
  573                         bzero(&mapinfo, sizeof(mapinfo));
  574 
  575                         /*
  576                          * Map the pattern and match buffers into kernel
  577                          * virtual address space.
  578                          */
  579                         error = cam_periph_mapmem(inccb, &mapinfo);
  580 
  581                         if (error) {
  582                                 inccb->ccb_h.path = old_path;
  583                                 break;
  584                         }
  585 
  586                         /*
  587                          * This is an immediate CCB, we can send it on directly.
  588                          */
  589                         xpt_action(inccb);
  590 
  591                         /*
  592                          * Map the buffers back into user space.
  593                          */
  594                         cam_periph_unmapmem(inccb, &mapinfo);
  595 
  596                         inccb->ccb_h.path = old_path;
  597 
  598                         error = 0;
  599                         break;
  600                 }
  601                 default:
  602                         error = ENOTSUP;
  603                         break;
  604                 }
  605                 xpt_release_bus(bus);
  606                 break;
  607         }
  608         /*
  609          * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input,
  610          * with the periphal driver name and unit name filled in.  The other
  611          * fields don't really matter as input.  The passthrough driver name
  612          * ("pass"), and unit number are passed back in the ccb.  The current
  613          * device generation number, and the index into the device peripheral
  614          * driver list, and the status are also passed back.  Note that
  615          * since we do everything in one pass, unlike the XPT_GDEVLIST ccb,
  616          * we never return a status of CAM_GDEVLIST_LIST_CHANGED.  It is
  617          * (or rather should be) impossible for the device peripheral driver
  618          * list to change since we look at the whole thing in one pass, and
  619          * we do it with lock protection.
  620          *
  621          */
  622         case CAMGETPASSTHRU: {
  623                 union ccb *ccb;
  624                 struct cam_periph *periph;
  625                 struct periph_driver **p_drv;
  626                 char   *name;
  627                 u_int unit;
  628                 u_int cur_generation;
  629                 int base_periph_found;
  630                 int splbreaknum;
  631 
  632                 ccb = (union ccb *)addr;
  633                 unit = ccb->cgdl.unit_number;
  634                 name = ccb->cgdl.periph_name;
  635                 /*
  636                  * Every 100 devices, we want to drop our lock protection to
  637                  * give the software interrupt handler a chance to run.
  638                  * Most systems won't run into this check, but this should
  639                  * avoid starvation in the software interrupt handler in
  640                  * large systems.
  641                  */
  642                 splbreaknum = 100;
  643 
  644                 ccb = (union ccb *)addr;
  645 
  646                 base_periph_found = 0;
  647 
  648                 /*
  649                  * Sanity check -- make sure we don't get a null peripheral
  650                  * driver name.
  651                  */
  652                 if (*ccb->cgdl.periph_name == '\0') {
  653                         error = EINVAL;
  654                         break;
  655                 }
  656 
  657                 /* Keep the list from changing while we traverse it */
  658                 mtx_lock(&xsoftc.xpt_topo_lock);
  659 ptstartover:
  660                 cur_generation = xsoftc.xpt_generation;
  661 
  662                 /* first find our driver in the list of drivers */
  663                 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++)
  664                         if (strcmp((*p_drv)->driver_name, name) == 0)
  665                                 break;
  666 
  667                 if (*p_drv == NULL) {
  668                         mtx_unlock(&xsoftc.xpt_topo_lock);
  669                         ccb->ccb_h.status = CAM_REQ_CMP_ERR;
  670                         ccb->cgdl.status = CAM_GDEVLIST_ERROR;
  671                         *ccb->cgdl.periph_name = '\0';
  672                         ccb->cgdl.unit_number = 0;
  673                         error = ENOENT;
  674                         break;
  675                 }
  676 
  677                 /*
  678                  * Run through every peripheral instance of this driver
  679                  * and check to see whether it matches the unit passed
  680                  * in by the user.  If it does, get out of the loops and
  681                  * find the passthrough driver associated with that
  682                  * peripheral driver.
  683                  */
  684                 for (periph = TAILQ_FIRST(&(*p_drv)->units); periph != NULL;
  685                      periph = TAILQ_NEXT(periph, unit_links)) {
  686 
  687                         if (periph->unit_number == unit) {
  688                                 break;
  689                         } else if (--splbreaknum == 0) {
  690                                 mtx_unlock(&xsoftc.xpt_topo_lock);
  691                                 mtx_lock(&xsoftc.xpt_topo_lock);
  692                                 splbreaknum = 100;
  693                                 if (cur_generation != xsoftc.xpt_generation)
  694                                        goto ptstartover;
  695                         }
  696                 }
  697                 /*
  698                  * If we found the peripheral driver that the user passed
  699                  * in, go through all of the peripheral drivers for that
  700                  * particular device and look for a passthrough driver.
  701                  */
  702                 if (periph != NULL) {
  703                         struct cam_ed *device;
  704                         int i;
  705 
  706                         base_periph_found = 1;
  707                         device = periph->path->device;
  708                         for (i = 0, periph = SLIST_FIRST(&device->periphs);
  709                              periph != NULL;
  710                              periph = SLIST_NEXT(periph, periph_links), i++) {
  711                                 /*
  712                                  * Check to see whether we have a
  713                                  * passthrough device or not.
  714                                  */
  715                                 if (strcmp(periph->periph_name, "pass") == 0) {
  716                                         /*
  717                                          * Fill in the getdevlist fields.
  718                                          */
  719                                         strcpy(ccb->cgdl.periph_name,
  720                                                periph->periph_name);
  721                                         ccb->cgdl.unit_number =
  722                                                 periph->unit_number;
  723                                         if (SLIST_NEXT(periph, periph_links))
  724                                                 ccb->cgdl.status =
  725                                                         CAM_GDEVLIST_MORE_DEVS;
  726                                         else
  727                                                 ccb->cgdl.status =
  728                                                        CAM_GDEVLIST_LAST_DEVICE;
  729                                         ccb->cgdl.generation =
  730                                                 device->generation;
  731                                         ccb->cgdl.index = i;
  732                                         /*
  733                                          * Fill in some CCB header fields
  734                                          * that the user may want.
  735                                          */
  736                                         ccb->ccb_h.path_id =
  737                                                 periph->path->bus->path_id;
  738                                         ccb->ccb_h.target_id =
  739                                                 periph->path->target->target_id;
  740                                         ccb->ccb_h.target_lun =
  741                                                 periph->path->device->lun_id;
  742                                         ccb->ccb_h.status = CAM_REQ_CMP;
  743                                         break;
  744                                 }
  745                         }
  746                 }
  747 
  748                 /*
  749                  * If the periph is null here, one of two things has
  750                  * happened.  The first possibility is that we couldn't
  751                  * find the unit number of the particular peripheral driver
  752                  * that the user is asking about.  e.g. the user asks for
  753                  * the passthrough driver for "da11".  We find the list of
  754                  * "da" peripherals all right, but there is no unit 11.
  755                  * The other possibility is that we went through the list
  756                  * of peripheral drivers attached to the device structure,
  757                  * but didn't find one with the name "pass".  Either way,
  758                  * we return ENOENT, since we couldn't find something.
  759                  */
  760                 if (periph == NULL) {
  761                         ccb->ccb_h.status = CAM_REQ_CMP_ERR;
  762                         ccb->cgdl.status = CAM_GDEVLIST_ERROR;
  763                         *ccb->cgdl.periph_name = '\0';
  764                         ccb->cgdl.unit_number = 0;
  765                         error = ENOENT;
  766                         /*
  767                          * It is unfortunate that this is even necessary,
  768                          * but there are many, many clueless users out there.
  769                          * If this is true, the user is looking for the
  770                          * passthrough driver, but doesn't have one in his
  771                          * kernel.
  772                          */
  773                         if (base_periph_found == 1) {
  774                                 printf("xptioctl: pass driver is not in the "
  775                                        "kernel\n");
  776                                 printf("xptioctl: put \"device pass\" in "
  777                                        "your kernel config file\n");
  778                         }
  779                 }
  780                 mtx_unlock(&xsoftc.xpt_topo_lock);
  781                 break;
  782                 }
  783         default:
  784                 error = ENOTTY;
  785                 break;
  786         }
  787 
  788         return(error);
  789 }
  790 
  791 static int
  792 cam_module_event_handler(module_t mod, int what, void *arg)
  793 {
  794         int error;
  795 
  796         switch (what) {
  797         case MOD_LOAD:
  798                 if ((error = xpt_init(NULL)) != 0)
  799                         return (error);
  800                 break;
  801         case MOD_UNLOAD:
  802                 return EBUSY;
  803         default:
  804                 return EOPNOTSUPP;
  805         }
  806 
  807         return 0;
  808 }
  809 
  810 static void
  811 xpt_rescan_done(struct cam_periph *periph, union ccb *done_ccb)
  812 {
  813 
  814         if (done_ccb->ccb_h.ppriv_ptr1 == NULL) {
  815                 xpt_free_path(done_ccb->ccb_h.path);
  816                 xpt_free_ccb(done_ccb);
  817         } else {
  818                 done_ccb->ccb_h.cbfcnp = done_ccb->ccb_h.ppriv_ptr1;
  819                 (*done_ccb->ccb_h.cbfcnp)(periph, done_ccb);
  820         }
  821         xpt_release_boot();
  822 }
  823 
  824 /* thread to handle bus rescans */
  825 static void
  826 xpt_scanner_thread(void *dummy)
  827 {
  828         union ccb       *ccb;
  829         struct cam_sim  *sim;
  830 
  831         xpt_lock_buses();
  832         for (;;) {
  833                 if (TAILQ_EMPTY(&xsoftc.ccb_scanq))
  834                         msleep(&xsoftc.ccb_scanq, &xsoftc.xpt_topo_lock, PRIBIO,
  835                                "ccb_scanq", 0);
  836                 if ((ccb = (union ccb *)TAILQ_FIRST(&xsoftc.ccb_scanq)) != NULL) {
  837                         TAILQ_REMOVE(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe);
  838                         xpt_unlock_buses();
  839 
  840                         sim = ccb->ccb_h.path->bus->sim;
  841                         CAM_SIM_LOCK(sim);
  842                         xpt_action(ccb);
  843                         CAM_SIM_UNLOCK(sim);
  844 
  845                         xpt_lock_buses();
  846                 }
  847         }
  848 }
  849 
  850 void
  851 xpt_rescan(union ccb *ccb)
  852 {
  853         struct ccb_hdr *hdr;
  854 
  855         /* Prepare request */
  856         if (ccb->ccb_h.path->target->target_id == CAM_TARGET_WILDCARD &&
  857             ccb->ccb_h.path->device->lun_id == CAM_LUN_WILDCARD)
  858                 ccb->ccb_h.func_code = XPT_SCAN_BUS;
  859         else if (ccb->ccb_h.path->target->target_id != CAM_TARGET_WILDCARD &&
  860             ccb->ccb_h.path->device->lun_id == CAM_LUN_WILDCARD)
  861                 ccb->ccb_h.func_code = XPT_SCAN_TGT;
  862         else if (ccb->ccb_h.path->target->target_id != CAM_TARGET_WILDCARD &&
  863             ccb->ccb_h.path->device->lun_id != CAM_LUN_WILDCARD)
  864                 ccb->ccb_h.func_code = XPT_SCAN_LUN;
  865         else {
  866                 xpt_print(ccb->ccb_h.path, "illegal scan path\n");
  867                 xpt_free_path(ccb->ccb_h.path);
  868                 xpt_free_ccb(ccb);
  869                 return;
  870         }
  871         ccb->ccb_h.ppriv_ptr1 = ccb->ccb_h.cbfcnp;
  872         ccb->ccb_h.cbfcnp = xpt_rescan_done;
  873         xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, CAM_PRIORITY_XPT);
  874         /* Don't make duplicate entries for the same paths. */
  875         xpt_lock_buses();
  876         if (ccb->ccb_h.ppriv_ptr1 == NULL) {
  877                 TAILQ_FOREACH(hdr, &xsoftc.ccb_scanq, sim_links.tqe) {
  878                         if (xpt_path_comp(hdr->path, ccb->ccb_h.path) == 0) {
  879                                 wakeup(&xsoftc.ccb_scanq);
  880                                 xpt_unlock_buses();
  881                                 xpt_print(ccb->ccb_h.path, "rescan already queued\n");
  882                                 xpt_free_path(ccb->ccb_h.path);
  883                                 xpt_free_ccb(ccb);
  884                                 return;
  885                         }
  886                 }
  887         }
  888         TAILQ_INSERT_TAIL(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe);
  889         xsoftc.buses_to_config++;
  890         wakeup(&xsoftc.ccb_scanq);
  891         xpt_unlock_buses();
  892 }
  893 
  894 /* Functions accessed by the peripheral drivers */
  895 static int
  896 xpt_init(void *dummy)
  897 {
  898         struct cam_sim *xpt_sim;
  899         struct cam_path *path;
  900         struct cam_devq *devq;
  901         cam_status status;
  902 
  903         TAILQ_INIT(&xsoftc.xpt_busses);
  904         TAILQ_INIT(&cam_simq);
  905         TAILQ_INIT(&xsoftc.ccb_scanq);
  906         STAILQ_INIT(&xsoftc.highpowerq);
  907         xsoftc.num_highpower = CAM_MAX_HIGHPOWER;
  908 
  909         mtx_init(&cam_simq_lock, "CAM SIMQ lock", NULL, MTX_DEF);
  910         mtx_init(&xsoftc.xpt_lock, "XPT lock", NULL, MTX_DEF);
  911         mtx_init(&xsoftc.xpt_topo_lock, "XPT topology lock", NULL, MTX_DEF);
  912 
  913         /*
  914          * The xpt layer is, itself, the equivelent of a SIM.
  915          * Allow 16 ccbs in the ccb pool for it.  This should
  916          * give decent parallelism when we probe busses and
  917          * perform other XPT functions.
  918          */
  919         devq = cam_simq_alloc(16);
  920         xpt_sim = cam_sim_alloc(xptaction,
  921                                 xptpoll,
  922                                 "xpt",
  923                                 /*softc*/NULL,
  924                                 /*unit*/0,
  925                                 /*mtx*/&xsoftc.xpt_lock,
  926                                 /*max_dev_transactions*/0,
  927                                 /*max_tagged_dev_transactions*/0,
  928                                 devq);
  929         if (xpt_sim == NULL)
  930                 return (ENOMEM);
  931 
  932         mtx_lock(&xsoftc.xpt_lock);
  933         if ((status = xpt_bus_register(xpt_sim, NULL, 0)) != CAM_SUCCESS) {
  934                 mtx_unlock(&xsoftc.xpt_lock);
  935                 printf("xpt_init: xpt_bus_register failed with status %#x,"
  936                        " failing attach\n", status);
  937                 return (EINVAL);
  938         }
  939 
  940         /*
  941          * Looking at the XPT from the SIM layer, the XPT is
  942          * the equivelent of a peripheral driver.  Allocate
  943          * a peripheral driver entry for us.
  944          */
  945         if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID,
  946                                       CAM_TARGET_WILDCARD,
  947                                       CAM_LUN_WILDCARD)) != CAM_REQ_CMP) {
  948                 mtx_unlock(&xsoftc.xpt_lock);
  949                 printf("xpt_init: xpt_create_path failed with status %#x,"
  950                        " failing attach\n", status);
  951                 return (EINVAL);
  952         }
  953 
  954         cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO,
  955                          path, NULL, 0, xpt_sim);
  956         xpt_free_path(path);
  957         mtx_unlock(&xsoftc.xpt_lock);
  958         /* Install our software interrupt handlers */
  959         swi_add(NULL, "cambio", camisr, NULL, SWI_CAMBIO, INTR_MPSAFE, &cambio_ih);
  960         /*
  961          * Register a callback for when interrupts are enabled.
  962          */
  963         xsoftc.xpt_config_hook =
  964             (struct intr_config_hook *)malloc(sizeof(struct intr_config_hook),
  965                                               M_CAMXPT, M_NOWAIT | M_ZERO);
  966         if (xsoftc.xpt_config_hook == NULL) {
  967                 printf("xpt_init: Cannot malloc config hook "
  968                        "- failing attach\n");
  969                 return (ENOMEM);
  970         }
  971         xsoftc.xpt_config_hook->ich_func = xpt_config;
  972         if (config_intrhook_establish(xsoftc.xpt_config_hook) != 0) {
  973                 free (xsoftc.xpt_config_hook, M_CAMXPT);
  974                 printf("xpt_init: config_intrhook_establish failed "
  975                        "- failing attach\n");
  976         }
  977 
  978         return (0);
  979 }
  980 
  981 static cam_status
  982 xptregister(struct cam_periph *periph, void *arg)
  983 {
  984         struct cam_sim *xpt_sim;
  985 
  986         if (periph == NULL) {
  987                 printf("xptregister: periph was NULL!!\n");
  988                 return(CAM_REQ_CMP_ERR);
  989         }
  990 
  991         xpt_sim = (struct cam_sim *)arg;
  992         xpt_sim->softc = periph;
  993         xpt_periph = periph;
  994         periph->softc = NULL;
  995 
  996         return(CAM_REQ_CMP);
  997 }
  998 
  999 int32_t
 1000 xpt_add_periph(struct cam_periph *periph)
 1001 {
 1002         struct cam_ed *device;
 1003         int32_t  status;
 1004         struct periph_list *periph_head;
 1005 
 1006         mtx_assert(periph->sim->mtx, MA_OWNED);
 1007 
 1008         device = periph->path->device;
 1009 
 1010         periph_head = &device->periphs;
 1011 
 1012         status = CAM_REQ_CMP;
 1013 
 1014         if (device != NULL) {
 1015                 /*
 1016                  * Make room for this peripheral
 1017                  * so it will fit in the queue
 1018                  * when it's scheduled to run
 1019                  */
 1020                 status = camq_resize(&device->drvq,
 1021                                      device->drvq.array_size + 1);
 1022 
 1023                 device->generation++;
 1024 
 1025                 SLIST_INSERT_HEAD(periph_head, periph, periph_links);
 1026         }
 1027 
 1028         mtx_lock(&xsoftc.xpt_topo_lock);
 1029         xsoftc.xpt_generation++;
 1030         mtx_unlock(&xsoftc.xpt_topo_lock);
 1031 
 1032         return (status);
 1033 }
 1034 
 1035 void
 1036 xpt_remove_periph(struct cam_periph *periph)
 1037 {
 1038         struct cam_ed *device;
 1039 
 1040         mtx_assert(periph->sim->mtx, MA_OWNED);
 1041 
 1042         device = periph->path->device;
 1043 
 1044         if (device != NULL) {
 1045                 struct periph_list *periph_head;
 1046 
 1047                 periph_head = &device->periphs;
 1048 
 1049                 /* Release the slot for this peripheral */
 1050                 camq_resize(&device->drvq, device->drvq.array_size - 1);
 1051 
 1052                 device->generation++;
 1053 
 1054                 SLIST_REMOVE(periph_head, periph, cam_periph, periph_links);
 1055         }
 1056 
 1057         mtx_lock(&xsoftc.xpt_topo_lock);
 1058         xsoftc.xpt_generation++;
 1059         mtx_unlock(&xsoftc.xpt_topo_lock);
 1060 }
 1061 
 1062 
 1063 void
 1064 xpt_announce_periph(struct cam_periph *periph, char *announce_string)
 1065 {
 1066         struct  cam_path *path = periph->path;
 1067 
 1068         mtx_assert(periph->sim->mtx, MA_OWNED);
 1069 
 1070         printf("%s%d at %s%d bus %d scbus%d target %d lun %d\n",
 1071                periph->periph_name, periph->unit_number,
 1072                path->bus->sim->sim_name,
 1073                path->bus->sim->unit_number,
 1074                path->bus->sim->bus_id,
 1075                path->bus->path_id,
 1076                path->target->target_id,
 1077                path->device->lun_id);
 1078         printf("%s%d: ", periph->periph_name, periph->unit_number);
 1079         if (path->device->protocol == PROTO_SCSI)
 1080                 scsi_print_inquiry(&path->device->inq_data);
 1081         else if (path->device->protocol == PROTO_ATA ||
 1082             path->device->protocol == PROTO_SATAPM)
 1083                 ata_print_ident(&path->device->ident_data);
 1084         else
 1085                 printf("Unknown protocol device\n");
 1086         if (bootverbose && path->device->serial_num_len > 0) {
 1087                 /* Don't wrap the screen  - print only the first 60 chars */
 1088                 printf("%s%d: Serial Number %.60s\n", periph->periph_name,
 1089                        periph->unit_number, path->device->serial_num);
 1090         }
 1091         /* Announce transport details. */
 1092         (*(path->bus->xport->announce))(periph);
 1093         /* Announce command queueing. */
 1094         if (path->device->inq_flags & SID_CmdQue
 1095          || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
 1096                 printf("%s%d: Command Queueing enabled\n",
 1097                        periph->periph_name, periph->unit_number);
 1098         }
 1099         /* Announce caller's details if they've passed in. */
 1100         if (announce_string != NULL)
 1101                 printf("%s%d: %s\n", periph->periph_name,
 1102                        periph->unit_number, announce_string);
 1103 }
 1104 
 1105 int
 1106 xpt_getattr(char *buf, size_t len, const char *attr, struct cam_path *path)
 1107 {
 1108         int ret = -1;
 1109         struct ccb_dev_advinfo cdai;
 1110 
 1111         memset(&cdai, 0, sizeof(cdai));
 1112         xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
 1113         cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
 1114         cdai.bufsiz = len;
 1115 
 1116         if (!strcmp(attr, "GEOM::ident"))
 1117                 cdai.buftype = CDAI_TYPE_SERIAL_NUM;
 1118         else if (!strcmp(attr, "GEOM::physpath"))
 1119                 cdai.buftype = CDAI_TYPE_PHYS_PATH;
 1120         else
 1121                 goto out;
 1122 
 1123         cdai.buf = malloc(cdai.bufsiz, M_CAMXPT, M_NOWAIT|M_ZERO);
 1124         if (cdai.buf == NULL) {
 1125                 ret = ENOMEM;
 1126                 goto out;
 1127         }
 1128         xpt_action((union ccb *)&cdai); /* can only be synchronous */
 1129         if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
 1130                 cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE);
 1131         if (cdai.provsiz == 0)
 1132                 goto out;
 1133         ret = 0;
 1134         if (strlcpy(buf, cdai.buf, len) >= len)
 1135                 ret = EFAULT;
 1136 
 1137 out:
 1138         if (cdai.buf != NULL)
 1139                 free(cdai.buf, M_CAMXPT);
 1140         return ret;
 1141 }
 1142 
 1143 static dev_match_ret
 1144 xptbusmatch(struct dev_match_pattern *patterns, u_int num_patterns,
 1145             struct cam_eb *bus)
 1146 {
 1147         dev_match_ret retval;
 1148         int i;
 1149 
 1150         retval = DM_RET_NONE;
 1151 
 1152         /*
 1153          * If we aren't given something to match against, that's an error.
 1154          */
 1155         if (bus == NULL)
 1156                 return(DM_RET_ERROR);
 1157 
 1158         /*
 1159          * If there are no match entries, then this bus matches no
 1160          * matter what.
 1161          */
 1162         if ((patterns == NULL) || (num_patterns == 0))
 1163                 return(DM_RET_DESCEND | DM_RET_COPY);
 1164 
 1165         for (i = 0; i < num_patterns; i++) {
 1166                 struct bus_match_pattern *cur_pattern;
 1167 
 1168                 /*
 1169                  * If the pattern in question isn't for a bus node, we
 1170                  * aren't interested.  However, we do indicate to the
 1171                  * calling routine that we should continue descending the
 1172                  * tree, since the user wants to match against lower-level
 1173                  * EDT elements.
 1174                  */
 1175                 if (patterns[i].type != DEV_MATCH_BUS) {
 1176                         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
 1177                                 retval |= DM_RET_DESCEND;
 1178                         continue;
 1179                 }
 1180 
 1181                 cur_pattern = &patterns[i].pattern.bus_pattern;
 1182 
 1183                 /*
 1184                  * If they want to match any bus node, we give them any
 1185                  * device node.
 1186                  */
 1187                 if (cur_pattern->flags == BUS_MATCH_ANY) {
 1188                         /* set the copy flag */
 1189                         retval |= DM_RET_COPY;
 1190 
 1191                         /*
 1192                          * If we've already decided on an action, go ahead
 1193                          * and return.
 1194                          */
 1195                         if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
 1196                                 return(retval);
 1197                 }
 1198 
 1199                 /*
 1200                  * Not sure why someone would do this...
 1201                  */
 1202                 if (cur_pattern->flags == BUS_MATCH_NONE)
 1203                         continue;
 1204 
 1205                 if (((cur_pattern->flags & BUS_MATCH_PATH) != 0)
 1206                  && (cur_pattern->path_id != bus->path_id))
 1207                         continue;
 1208 
 1209                 if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0)
 1210                  && (cur_pattern->bus_id != bus->sim->bus_id))
 1211                         continue;
 1212 
 1213                 if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0)
 1214                  && (cur_pattern->unit_number != bus->sim->unit_number))
 1215                         continue;
 1216 
 1217                 if (((cur_pattern->flags & BUS_MATCH_NAME) != 0)
 1218                  && (strncmp(cur_pattern->dev_name, bus->sim->sim_name,
 1219                              DEV_IDLEN) != 0))
 1220                         continue;
 1221 
 1222                 /*
 1223                  * If we get to this point, the user definitely wants
 1224                  * information on this bus.  So tell the caller to copy the
 1225                  * data out.
 1226                  */
 1227                 retval |= DM_RET_COPY;
 1228 
 1229                 /*
 1230                  * If the return action has been set to descend, then we
 1231                  * know that we've already seen a non-bus matching
 1232                  * expression, therefore we need to further descend the tree.
 1233                  * This won't change by continuing around the loop, so we
 1234                  * go ahead and return.  If we haven't seen a non-bus
 1235                  * matching expression, we keep going around the loop until
 1236                  * we exhaust the matching expressions.  We'll set the stop
 1237                  * flag once we fall out of the loop.
 1238                  */
 1239                 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
 1240                         return(retval);
 1241         }
 1242 
 1243         /*
 1244          * If the return action hasn't been set to descend yet, that means
 1245          * we haven't seen anything other than bus matching patterns.  So
 1246          * tell the caller to stop descending the tree -- the user doesn't
 1247          * want to match against lower level tree elements.
 1248          */
 1249         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
 1250                 retval |= DM_RET_STOP;
 1251 
 1252         return(retval);
 1253 }
 1254 
 1255 static dev_match_ret
 1256 xptdevicematch(struct dev_match_pattern *patterns, u_int num_patterns,
 1257                struct cam_ed *device)
 1258 {
 1259         dev_match_ret retval;
 1260         int i;
 1261 
 1262         retval = DM_RET_NONE;
 1263 
 1264         /*
 1265          * If we aren't given something to match against, that's an error.
 1266          */
 1267         if (device == NULL)
 1268                 return(DM_RET_ERROR);
 1269 
 1270         /*
 1271          * If there are no match entries, then this device matches no
 1272          * matter what.
 1273          */
 1274         if ((patterns == NULL) || (num_patterns == 0))
 1275                 return(DM_RET_DESCEND | DM_RET_COPY);
 1276 
 1277         for (i = 0; i < num_patterns; i++) {
 1278                 struct device_match_pattern *cur_pattern;
 1279                 struct scsi_vpd_device_id *device_id_page;
 1280 
 1281                 /*
 1282                  * If the pattern in question isn't for a device node, we
 1283                  * aren't interested.
 1284                  */
 1285                 if (patterns[i].type != DEV_MATCH_DEVICE) {
 1286                         if ((patterns[i].type == DEV_MATCH_PERIPH)
 1287                          && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE))
 1288                                 retval |= DM_RET_DESCEND;
 1289                         continue;
 1290                 }
 1291 
 1292                 cur_pattern = &patterns[i].pattern.device_pattern;
 1293 
 1294                 /* Error out if mutually exclusive options are specified. */ 
 1295                 if ((cur_pattern->flags & (DEV_MATCH_INQUIRY|DEV_MATCH_DEVID))
 1296                  == (DEV_MATCH_INQUIRY|DEV_MATCH_DEVID))
 1297                         return(DM_RET_ERROR);
 1298 
 1299                 /*
 1300                  * If they want to match any device node, we give them any
 1301                  * device node.
 1302                  */
 1303                 if (cur_pattern->flags == DEV_MATCH_ANY)
 1304                         goto copy_dev_node;
 1305 
 1306                 /*
 1307                  * Not sure why someone would do this...
 1308                  */
 1309                 if (cur_pattern->flags == DEV_MATCH_NONE)
 1310                         continue;
 1311 
 1312                 if (((cur_pattern->flags & DEV_MATCH_PATH) != 0)
 1313                  && (cur_pattern->path_id != device->target->bus->path_id))
 1314                         continue;
 1315 
 1316                 if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0)
 1317                  && (cur_pattern->target_id != device->target->target_id))
 1318                         continue;
 1319 
 1320                 if (((cur_pattern->flags & DEV_MATCH_LUN) != 0)
 1321                  && (cur_pattern->target_lun != device->lun_id))
 1322                         continue;
 1323 
 1324                 if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0)
 1325                  && (cam_quirkmatch((caddr_t)&device->inq_data,
 1326                                     (caddr_t)&cur_pattern->data.inq_pat,
 1327                                     1, sizeof(cur_pattern->data.inq_pat),
 1328                                     scsi_static_inquiry_match) == NULL))
 1329                         continue;
 1330 
 1331                 device_id_page = (struct scsi_vpd_device_id *)device->device_id;
 1332                 if (((cur_pattern->flags & DEV_MATCH_DEVID) != 0)
 1333                  && (device->device_id_len < SVPD_DEVICE_ID_HDR_LEN
 1334                   || scsi_devid_match((uint8_t *)device_id_page->desc_list,
 1335                                       device->device_id_len
 1336                                     - SVPD_DEVICE_ID_HDR_LEN,
 1337                                       cur_pattern->data.devid_pat.id,
 1338                                       cur_pattern->data.devid_pat.id_len) != 0))
 1339                         continue;
 1340 
 1341 copy_dev_node:
 1342                 /*
 1343                  * If we get to this point, the user definitely wants
 1344                  * information on this device.  So tell the caller to copy
 1345                  * the data out.
 1346                  */
 1347                 retval |= DM_RET_COPY;
 1348 
 1349                 /*
 1350                  * If the return action has been set to descend, then we
 1351                  * know that we've already seen a peripheral matching
 1352                  * expression, therefore we need to further descend the tree.
 1353                  * This won't change by continuing around the loop, so we
 1354                  * go ahead and return.  If we haven't seen a peripheral
 1355                  * matching expression, we keep going around the loop until
 1356                  * we exhaust the matching expressions.  We'll set the stop
 1357                  * flag once we fall out of the loop.
 1358                  */
 1359                 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
 1360                         return(retval);
 1361         }
 1362 
 1363         /*
 1364          * If the return action hasn't been set to descend yet, that means
 1365          * we haven't seen any peripheral matching patterns.  So tell the
 1366          * caller to stop descending the tree -- the user doesn't want to
 1367          * match against lower level tree elements.
 1368          */
 1369         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
 1370                 retval |= DM_RET_STOP;
 1371 
 1372         return(retval);
 1373 }
 1374 
 1375 /*
 1376  * Match a single peripheral against any number of match patterns.
 1377  */
 1378 static dev_match_ret
 1379 xptperiphmatch(struct dev_match_pattern *patterns, u_int num_patterns,
 1380                struct cam_periph *periph)
 1381 {
 1382         dev_match_ret retval;
 1383         int i;
 1384 
 1385         /*
 1386          * If we aren't given something to match against, that's an error.
 1387          */
 1388         if (periph == NULL)
 1389                 return(DM_RET_ERROR);
 1390 
 1391         /*
 1392          * If there are no match entries, then this peripheral matches no
 1393          * matter what.
 1394          */
 1395         if ((patterns == NULL) || (num_patterns == 0))
 1396                 return(DM_RET_STOP | DM_RET_COPY);
 1397 
 1398         /*
 1399          * There aren't any nodes below a peripheral node, so there's no
 1400          * reason to descend the tree any further.
 1401          */
 1402         retval = DM_RET_STOP;
 1403 
 1404         for (i = 0; i < num_patterns; i++) {
 1405                 struct periph_match_pattern *cur_pattern;
 1406 
 1407                 /*
 1408                  * If the pattern in question isn't for a peripheral, we
 1409                  * aren't interested.
 1410                  */
 1411                 if (patterns[i].type != DEV_MATCH_PERIPH)
 1412                         continue;
 1413 
 1414                 cur_pattern = &patterns[i].pattern.periph_pattern;
 1415 
 1416                 /*
 1417                  * If they want to match on anything, then we will do so.
 1418                  */
 1419                 if (cur_pattern->flags == PERIPH_MATCH_ANY) {
 1420                         /* set the copy flag */
 1421                         retval |= DM_RET_COPY;
 1422 
 1423                         /*
 1424                          * We've already set the return action to stop,
 1425                          * since there are no nodes below peripherals in
 1426                          * the tree.
 1427                          */
 1428                         return(retval);
 1429                 }
 1430 
 1431                 /*
 1432                  * Not sure why someone would do this...
 1433                  */
 1434                 if (cur_pattern->flags == PERIPH_MATCH_NONE)
 1435                         continue;
 1436 
 1437                 if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0)
 1438                  && (cur_pattern->path_id != periph->path->bus->path_id))
 1439                         continue;
 1440 
 1441                 /*
 1442                  * For the target and lun id's, we have to make sure the
 1443                  * target and lun pointers aren't NULL.  The xpt peripheral
 1444                  * has a wildcard target and device.
 1445                  */
 1446                 if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0)
 1447                  && ((periph->path->target == NULL)
 1448                  ||(cur_pattern->target_id != periph->path->target->target_id)))
 1449                         continue;
 1450 
 1451                 if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0)
 1452                  && ((periph->path->device == NULL)
 1453                  || (cur_pattern->target_lun != periph->path->device->lun_id)))
 1454                         continue;
 1455 
 1456                 if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0)
 1457                  && (cur_pattern->unit_number != periph->unit_number))
 1458                         continue;
 1459 
 1460                 if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0)
 1461                  && (strncmp(cur_pattern->periph_name, periph->periph_name,
 1462                              DEV_IDLEN) != 0))
 1463                         continue;
 1464 
 1465                 /*
 1466                  * If we get to this point, the user definitely wants
 1467                  * information on this peripheral.  So tell the caller to
 1468                  * copy the data out.
 1469                  */
 1470                 retval |= DM_RET_COPY;
 1471 
 1472                 /*
 1473                  * The return action has already been set to stop, since
 1474                  * peripherals don't have any nodes below them in the EDT.
 1475                  */
 1476                 return(retval);
 1477         }
 1478 
 1479         /*
 1480          * If we get to this point, the peripheral that was passed in
 1481          * doesn't match any of the patterns.
 1482          */
 1483         return(retval);
 1484 }
 1485 
 1486 static int
 1487 xptedtbusfunc(struct cam_eb *bus, void *arg)
 1488 {
 1489         struct ccb_dev_match *cdm;
 1490         dev_match_ret retval;
 1491 
 1492         cdm = (struct ccb_dev_match *)arg;
 1493 
 1494         /*
 1495          * If our position is for something deeper in the tree, that means
 1496          * that we've already seen this node.  So, we keep going down.
 1497          */
 1498         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 1499          && (cdm->pos.cookie.bus == bus)
 1500          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 1501          && (cdm->pos.cookie.target != NULL))
 1502                 retval = DM_RET_DESCEND;
 1503         else
 1504                 retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus);
 1505 
 1506         /*
 1507          * If we got an error, bail out of the search.
 1508          */
 1509         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
 1510                 cdm->status = CAM_DEV_MATCH_ERROR;
 1511                 return(0);
 1512         }
 1513 
 1514         /*
 1515          * If the copy flag is set, copy this bus out.
 1516          */
 1517         if (retval & DM_RET_COPY) {
 1518                 int spaceleft, j;
 1519 
 1520                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
 1521                         sizeof(struct dev_match_result));
 1522 
 1523                 /*
 1524                  * If we don't have enough space to put in another
 1525                  * match result, save our position and tell the
 1526                  * user there are more devices to check.
 1527                  */
 1528                 if (spaceleft < sizeof(struct dev_match_result)) {
 1529                         bzero(&cdm->pos, sizeof(cdm->pos));
 1530                         cdm->pos.position_type =
 1531                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS;
 1532 
 1533                         cdm->pos.cookie.bus = bus;
 1534                         cdm->pos.generations[CAM_BUS_GENERATION]=
 1535                                 xsoftc.bus_generation;
 1536                         cdm->status = CAM_DEV_MATCH_MORE;
 1537                         return(0);
 1538                 }
 1539                 j = cdm->num_matches;
 1540                 cdm->num_matches++;
 1541                 cdm->matches[j].type = DEV_MATCH_BUS;
 1542                 cdm->matches[j].result.bus_result.path_id = bus->path_id;
 1543                 cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id;
 1544                 cdm->matches[j].result.bus_result.unit_number =
 1545                         bus->sim->unit_number;
 1546                 strncpy(cdm->matches[j].result.bus_result.dev_name,
 1547                         bus->sim->sim_name, DEV_IDLEN);
 1548         }
 1549 
 1550         /*
 1551          * If the user is only interested in busses, there's no
 1552          * reason to descend to the next level in the tree.
 1553          */
 1554         if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
 1555                 return(1);
 1556 
 1557         /*
 1558          * If there is a target generation recorded, check it to
 1559          * make sure the target list hasn't changed.
 1560          */
 1561         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 1562          && (bus == cdm->pos.cookie.bus)
 1563          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 1564          && (cdm->pos.generations[CAM_TARGET_GENERATION] != 0)
 1565          && (cdm->pos.generations[CAM_TARGET_GENERATION] !=
 1566              bus->generation)) {
 1567                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
 1568                 return(0);
 1569         }
 1570 
 1571         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 1572          && (cdm->pos.cookie.bus == bus)
 1573          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 1574          && (cdm->pos.cookie.target != NULL))
 1575                 return(xpttargettraverse(bus,
 1576                                         (struct cam_et *)cdm->pos.cookie.target,
 1577                                          xptedttargetfunc, arg));
 1578         else
 1579                 return(xpttargettraverse(bus, NULL, xptedttargetfunc, arg));
 1580 }
 1581 
 1582 static int
 1583 xptedttargetfunc(struct cam_et *target, void *arg)
 1584 {
 1585         struct ccb_dev_match *cdm;
 1586 
 1587         cdm = (struct ccb_dev_match *)arg;
 1588 
 1589         /*
 1590          * If there is a device list generation recorded, check it to
 1591          * make sure the device list hasn't changed.
 1592          */
 1593         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 1594          && (cdm->pos.cookie.bus == target->bus)
 1595          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 1596          && (cdm->pos.cookie.target == target)
 1597          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
 1598          && (cdm->pos.generations[CAM_DEV_GENERATION] != 0)
 1599          && (cdm->pos.generations[CAM_DEV_GENERATION] !=
 1600              target->generation)) {
 1601                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
 1602                 return(0);
 1603         }
 1604 
 1605         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 1606          && (cdm->pos.cookie.bus == target->bus)
 1607          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 1608          && (cdm->pos.cookie.target == target)
 1609          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
 1610          && (cdm->pos.cookie.device != NULL))
 1611                 return(xptdevicetraverse(target,
 1612                                         (struct cam_ed *)cdm->pos.cookie.device,
 1613                                          xptedtdevicefunc, arg));
 1614         else
 1615                 return(xptdevicetraverse(target, NULL, xptedtdevicefunc, arg));
 1616 }
 1617 
 1618 static int
 1619 xptedtdevicefunc(struct cam_ed *device, void *arg)
 1620 {
 1621 
 1622         struct ccb_dev_match *cdm;
 1623         dev_match_ret retval;
 1624 
 1625         cdm = (struct ccb_dev_match *)arg;
 1626 
 1627         /*
 1628          * If our position is for something deeper in the tree, that means
 1629          * that we've already seen this node.  So, we keep going down.
 1630          */
 1631         if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE)
 1632          && (cdm->pos.cookie.device == device)
 1633          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
 1634          && (cdm->pos.cookie.periph != NULL))
 1635                 retval = DM_RET_DESCEND;
 1636         else
 1637                 retval = xptdevicematch(cdm->patterns, cdm->num_patterns,
 1638                                         device);
 1639 
 1640         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
 1641                 cdm->status = CAM_DEV_MATCH_ERROR;
 1642                 return(0);
 1643         }
 1644 
 1645         /*
 1646          * If the copy flag is set, copy this device out.
 1647          */
 1648         if (retval & DM_RET_COPY) {
 1649                 int spaceleft, j;
 1650 
 1651                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
 1652                         sizeof(struct dev_match_result));
 1653 
 1654                 /*
 1655                  * If we don't have enough space to put in another
 1656                  * match result, save our position and tell the
 1657                  * user there are more devices to check.
 1658                  */
 1659                 if (spaceleft < sizeof(struct dev_match_result)) {
 1660                         bzero(&cdm->pos, sizeof(cdm->pos));
 1661                         cdm->pos.position_type =
 1662                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
 1663                                 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE;
 1664 
 1665                         cdm->pos.cookie.bus = device->target->bus;
 1666                         cdm->pos.generations[CAM_BUS_GENERATION]=
 1667                                 xsoftc.bus_generation;
 1668                         cdm->pos.cookie.target = device->target;
 1669                         cdm->pos.generations[CAM_TARGET_GENERATION] =
 1670                                 device->target->bus->generation;
 1671                         cdm->pos.cookie.device = device;
 1672                         cdm->pos.generations[CAM_DEV_GENERATION] =
 1673                                 device->target->generation;
 1674                         cdm->status = CAM_DEV_MATCH_MORE;
 1675                         return(0);
 1676                 }
 1677                 j = cdm->num_matches;
 1678                 cdm->num_matches++;
 1679                 cdm->matches[j].type = DEV_MATCH_DEVICE;
 1680                 cdm->matches[j].result.device_result.path_id =
 1681                         device->target->bus->path_id;
 1682                 cdm->matches[j].result.device_result.target_id =
 1683                         device->target->target_id;
 1684                 cdm->matches[j].result.device_result.target_lun =
 1685                         device->lun_id;
 1686                 cdm->matches[j].result.device_result.protocol =
 1687                         device->protocol;
 1688                 bcopy(&device->inq_data,
 1689                       &cdm->matches[j].result.device_result.inq_data,
 1690                       sizeof(struct scsi_inquiry_data));
 1691                 bcopy(&device->ident_data,
 1692                       &cdm->matches[j].result.device_result.ident_data,
 1693                       sizeof(struct ata_params));
 1694 
 1695                 /* Let the user know whether this device is unconfigured */
 1696                 if (device->flags & CAM_DEV_UNCONFIGURED)
 1697                         cdm->matches[j].result.device_result.flags =
 1698                                 DEV_RESULT_UNCONFIGURED;
 1699                 else
 1700                         cdm->matches[j].result.device_result.flags =
 1701                                 DEV_RESULT_NOFLAG;
 1702         }
 1703 
 1704         /*
 1705          * If the user isn't interested in peripherals, don't descend
 1706          * the tree any further.
 1707          */
 1708         if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
 1709                 return(1);
 1710 
 1711         /*
 1712          * If there is a peripheral list generation recorded, make sure
 1713          * it hasn't changed.
 1714          */
 1715         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 1716          && (device->target->bus == cdm->pos.cookie.bus)
 1717          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 1718          && (device->target == cdm->pos.cookie.target)
 1719          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
 1720          && (device == cdm->pos.cookie.device)
 1721          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
 1722          && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
 1723          && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
 1724              device->generation)){
 1725                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
 1726                 return(0);
 1727         }
 1728 
 1729         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 1730          && (cdm->pos.cookie.bus == device->target->bus)
 1731          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 1732          && (cdm->pos.cookie.target == device->target)
 1733          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
 1734          && (cdm->pos.cookie.device == device)
 1735          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
 1736          && (cdm->pos.cookie.periph != NULL))
 1737                 return(xptperiphtraverse(device,
 1738                                 (struct cam_periph *)cdm->pos.cookie.periph,
 1739                                 xptedtperiphfunc, arg));
 1740         else
 1741                 return(xptperiphtraverse(device, NULL, xptedtperiphfunc, arg));
 1742 }
 1743 
 1744 static int
 1745 xptedtperiphfunc(struct cam_periph *periph, void *arg)
 1746 {
 1747         struct ccb_dev_match *cdm;
 1748         dev_match_ret retval;
 1749 
 1750         cdm = (struct ccb_dev_match *)arg;
 1751 
 1752         retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
 1753 
 1754         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
 1755                 cdm->status = CAM_DEV_MATCH_ERROR;
 1756                 return(0);
 1757         }
 1758 
 1759         /*
 1760          * If the copy flag is set, copy this peripheral out.
 1761          */
 1762         if (retval & DM_RET_COPY) {
 1763                 int spaceleft, j;
 1764 
 1765                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
 1766                         sizeof(struct dev_match_result));
 1767 
 1768                 /*
 1769                  * If we don't have enough space to put in another
 1770                  * match result, save our position and tell the
 1771                  * user there are more devices to check.
 1772                  */
 1773                 if (spaceleft < sizeof(struct dev_match_result)) {
 1774                         bzero(&cdm->pos, sizeof(cdm->pos));
 1775                         cdm->pos.position_type =
 1776                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
 1777                                 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE |
 1778                                 CAM_DEV_POS_PERIPH;
 1779 
 1780                         cdm->pos.cookie.bus = periph->path->bus;
 1781                         cdm->pos.generations[CAM_BUS_GENERATION]=
 1782                                 xsoftc.bus_generation;
 1783                         cdm->pos.cookie.target = periph->path->target;
 1784                         cdm->pos.generations[CAM_TARGET_GENERATION] =
 1785                                 periph->path->bus->generation;
 1786                         cdm->pos.cookie.device = periph->path->device;
 1787                         cdm->pos.generations[CAM_DEV_GENERATION] =
 1788                                 periph->path->target->generation;
 1789                         cdm->pos.cookie.periph = periph;
 1790                         cdm->pos.generations[CAM_PERIPH_GENERATION] =
 1791                                 periph->path->device->generation;
 1792                         cdm->status = CAM_DEV_MATCH_MORE;
 1793                         return(0);
 1794                 }
 1795 
 1796                 j = cdm->num_matches;
 1797                 cdm->num_matches++;
 1798                 cdm->matches[j].type = DEV_MATCH_PERIPH;
 1799                 cdm->matches[j].result.periph_result.path_id =
 1800                         periph->path->bus->path_id;
 1801                 cdm->matches[j].result.periph_result.target_id =
 1802                         periph->path->target->target_id;
 1803                 cdm->matches[j].result.periph_result.target_lun =
 1804                         periph->path->device->lun_id;
 1805                 cdm->matches[j].result.periph_result.unit_number =
 1806                         periph->unit_number;
 1807                 strncpy(cdm->matches[j].result.periph_result.periph_name,
 1808                         periph->periph_name, DEV_IDLEN);
 1809         }
 1810 
 1811         return(1);
 1812 }
 1813 
 1814 static int
 1815 xptedtmatch(struct ccb_dev_match *cdm)
 1816 {
 1817         int ret;
 1818 
 1819         cdm->num_matches = 0;
 1820 
 1821         /*
 1822          * Check the bus list generation.  If it has changed, the user
 1823          * needs to reset everything and start over.
 1824          */
 1825         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 1826          && (cdm->pos.generations[CAM_BUS_GENERATION] != 0)
 1827          && (cdm->pos.generations[CAM_BUS_GENERATION] != xsoftc.bus_generation)) {
 1828                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
 1829                 return(0);
 1830         }
 1831 
 1832         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 1833          && (cdm->pos.cookie.bus != NULL))
 1834                 ret = xptbustraverse((struct cam_eb *)cdm->pos.cookie.bus,
 1835                                      xptedtbusfunc, cdm);
 1836         else
 1837                 ret = xptbustraverse(NULL, xptedtbusfunc, cdm);
 1838 
 1839         /*
 1840          * If we get back 0, that means that we had to stop before fully
 1841          * traversing the EDT.  It also means that one of the subroutines
 1842          * has set the status field to the proper value.  If we get back 1,
 1843          * we've fully traversed the EDT and copied out any matching entries.
 1844          */
 1845         if (ret == 1)
 1846                 cdm->status = CAM_DEV_MATCH_LAST;
 1847 
 1848         return(ret);
 1849 }
 1850 
 1851 static int
 1852 xptplistpdrvfunc(struct periph_driver **pdrv, void *arg)
 1853 {
 1854         struct ccb_dev_match *cdm;
 1855 
 1856         cdm = (struct ccb_dev_match *)arg;
 1857 
 1858         if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
 1859          && (cdm->pos.cookie.pdrv == pdrv)
 1860          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
 1861          && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
 1862          && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
 1863              (*pdrv)->generation)) {
 1864                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
 1865                 return(0);
 1866         }
 1867 
 1868         if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
 1869          && (cdm->pos.cookie.pdrv == pdrv)
 1870          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
 1871          && (cdm->pos.cookie.periph != NULL))
 1872                 return(xptpdperiphtraverse(pdrv,
 1873                                 (struct cam_periph *)cdm->pos.cookie.periph,
 1874                                 xptplistperiphfunc, arg));
 1875         else
 1876                 return(xptpdperiphtraverse(pdrv, NULL,xptplistperiphfunc, arg));
 1877 }
 1878 
 1879 static int
 1880 xptplistperiphfunc(struct cam_periph *periph, void *arg)
 1881 {
 1882         struct ccb_dev_match *cdm;
 1883         dev_match_ret retval;
 1884 
 1885         cdm = (struct ccb_dev_match *)arg;
 1886 
 1887         retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
 1888 
 1889         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
 1890                 cdm->status = CAM_DEV_MATCH_ERROR;
 1891                 return(0);
 1892         }
 1893 
 1894         /*
 1895          * If the copy flag is set, copy this peripheral out.
 1896          */
 1897         if (retval & DM_RET_COPY) {
 1898                 int spaceleft, j;
 1899 
 1900                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
 1901                         sizeof(struct dev_match_result));
 1902 
 1903                 /*
 1904                  * If we don't have enough space to put in another
 1905                  * match result, save our position and tell the
 1906                  * user there are more devices to check.
 1907                  */
 1908                 if (spaceleft < sizeof(struct dev_match_result)) {
 1909                         struct periph_driver **pdrv;
 1910 
 1911                         pdrv = NULL;
 1912                         bzero(&cdm->pos, sizeof(cdm->pos));
 1913                         cdm->pos.position_type =
 1914                                 CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR |
 1915                                 CAM_DEV_POS_PERIPH;
 1916 
 1917                         /*
 1918                          * This may look a bit non-sensical, but it is
 1919                          * actually quite logical.  There are very few
 1920                          * peripheral drivers, and bloating every peripheral
 1921                          * structure with a pointer back to its parent
 1922                          * peripheral driver linker set entry would cost
 1923                          * more in the long run than doing this quick lookup.
 1924                          */
 1925                         for (pdrv = periph_drivers; *pdrv != NULL; pdrv++) {
 1926                                 if (strcmp((*pdrv)->driver_name,
 1927                                     periph->periph_name) == 0)
 1928                                         break;
 1929                         }
 1930 
 1931                         if (*pdrv == NULL) {
 1932                                 cdm->status = CAM_DEV_MATCH_ERROR;
 1933                                 return(0);
 1934                         }
 1935 
 1936                         cdm->pos.cookie.pdrv = pdrv;
 1937                         /*
 1938                          * The periph generation slot does double duty, as
 1939                          * does the periph pointer slot.  They are used for
 1940                          * both edt and pdrv lookups and positioning.
 1941                          */
 1942                         cdm->pos.cookie.periph = periph;
 1943                         cdm->pos.generations[CAM_PERIPH_GENERATION] =
 1944                                 (*pdrv)->generation;
 1945                         cdm->status = CAM_DEV_MATCH_MORE;
 1946                         return(0);
 1947                 }
 1948 
 1949                 j = cdm->num_matches;
 1950                 cdm->num_matches++;
 1951                 cdm->matches[j].type = DEV_MATCH_PERIPH;
 1952                 cdm->matches[j].result.periph_result.path_id =
 1953                         periph->path->bus->path_id;
 1954 
 1955                 /*
 1956                  * The transport layer peripheral doesn't have a target or
 1957                  * lun.
 1958                  */
 1959                 if (periph->path->target)
 1960                         cdm->matches[j].result.periph_result.target_id =
 1961                                 periph->path->target->target_id;
 1962                 else
 1963                         cdm->matches[j].result.periph_result.target_id = -1;
 1964 
 1965                 if (periph->path->device)
 1966                         cdm->matches[j].result.periph_result.target_lun =
 1967                                 periph->path->device->lun_id;
 1968                 else
 1969                         cdm->matches[j].result.periph_result.target_lun = -1;
 1970 
 1971                 cdm->matches[j].result.periph_result.unit_number =
 1972                         periph->unit_number;
 1973                 strncpy(cdm->matches[j].result.periph_result.periph_name,
 1974                         periph->periph_name, DEV_IDLEN);
 1975         }
 1976 
 1977         return(1);
 1978 }
 1979 
 1980 static int
 1981 xptperiphlistmatch(struct ccb_dev_match *cdm)
 1982 {
 1983         int ret;
 1984 
 1985         cdm->num_matches = 0;
 1986 
 1987         /*
 1988          * At this point in the edt traversal function, we check the bus
 1989          * list generation to make sure that no busses have been added or
 1990          * removed since the user last sent a XPT_DEV_MATCH ccb through.
 1991          * For the peripheral driver list traversal function, however, we
 1992          * don't have to worry about new peripheral driver types coming or
 1993          * going; they're in a linker set, and therefore can't change
 1994          * without a recompile.
 1995          */
 1996 
 1997         if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
 1998          && (cdm->pos.cookie.pdrv != NULL))
 1999                 ret = xptpdrvtraverse(
 2000                                 (struct periph_driver **)cdm->pos.cookie.pdrv,
 2001                                 xptplistpdrvfunc, cdm);
 2002         else
 2003                 ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm);
 2004 
 2005         /*
 2006          * If we get back 0, that means that we had to stop before fully
 2007          * traversing the peripheral driver tree.  It also means that one of
 2008          * the subroutines has set the status field to the proper value.  If
 2009          * we get back 1, we've fully traversed the EDT and copied out any
 2010          * matching entries.
 2011          */
 2012         if (ret == 1)
 2013                 cdm->status = CAM_DEV_MATCH_LAST;
 2014 
 2015         return(ret);
 2016 }
 2017 
 2018 static int
 2019 xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg)
 2020 {
 2021         struct cam_eb *bus, *next_bus;
 2022         int retval;
 2023 
 2024         retval = 1;
 2025 
 2026         mtx_lock(&xsoftc.xpt_topo_lock);
 2027         for (bus = (start_bus ? start_bus : TAILQ_FIRST(&xsoftc.xpt_busses));
 2028              bus != NULL;
 2029              bus = next_bus) {
 2030                 next_bus = TAILQ_NEXT(bus, links);
 2031 
 2032                 mtx_unlock(&xsoftc.xpt_topo_lock);
 2033                 CAM_SIM_LOCK(bus->sim);
 2034                 retval = tr_func(bus, arg);
 2035                 CAM_SIM_UNLOCK(bus->sim);
 2036                 if (retval == 0)
 2037                         return(retval);
 2038                 mtx_lock(&xsoftc.xpt_topo_lock);
 2039         }
 2040         mtx_unlock(&xsoftc.xpt_topo_lock);
 2041 
 2042         return(retval);
 2043 }
 2044 
 2045 int
 2046 xpt_sim_opened(struct cam_sim *sim)
 2047 {
 2048         struct cam_eb *bus;
 2049         struct cam_et *target;
 2050         struct cam_ed *device;
 2051         struct cam_periph *periph;
 2052 
 2053         KASSERT(sim->refcount >= 1, ("sim->refcount >= 1"));
 2054         mtx_assert(sim->mtx, MA_OWNED);
 2055 
 2056         mtx_lock(&xsoftc.xpt_topo_lock);
 2057         TAILQ_FOREACH(bus, &xsoftc.xpt_busses, links) {
 2058                 if (bus->sim != sim)
 2059                         continue;
 2060 
 2061                 TAILQ_FOREACH(target, &bus->et_entries, links) {
 2062                         TAILQ_FOREACH(device, &target->ed_entries, links) {
 2063                                 SLIST_FOREACH(periph, &device->periphs,
 2064                                     periph_links) {
 2065                                         if (periph->refcount > 0) {
 2066                                                 mtx_unlock(&xsoftc.xpt_topo_lock);
 2067                                                 return (1);
 2068                                         }
 2069                                 }
 2070                         }
 2071                 }
 2072         }
 2073 
 2074         mtx_unlock(&xsoftc.xpt_topo_lock);
 2075         return (0);
 2076 }
 2077 
 2078 static int
 2079 xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target,
 2080                   xpt_targetfunc_t *tr_func, void *arg)
 2081 {
 2082         struct cam_et *target, *next_target;
 2083         int retval;
 2084 
 2085         retval = 1;
 2086         for (target = (start_target ? start_target :
 2087                        TAILQ_FIRST(&bus->et_entries));
 2088              target != NULL; target = next_target) {
 2089 
 2090                 next_target = TAILQ_NEXT(target, links);
 2091 
 2092                 retval = tr_func(target, arg);
 2093 
 2094                 if (retval == 0)
 2095                         return(retval);
 2096         }
 2097 
 2098         return(retval);
 2099 }
 2100 
 2101 static int
 2102 xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device,
 2103                   xpt_devicefunc_t *tr_func, void *arg)
 2104 {
 2105         struct cam_ed *device, *next_device;
 2106         int retval;
 2107 
 2108         retval = 1;
 2109         for (device = (start_device ? start_device :
 2110                        TAILQ_FIRST(&target->ed_entries));
 2111              device != NULL;
 2112              device = next_device) {
 2113 
 2114                 next_device = TAILQ_NEXT(device, links);
 2115 
 2116                 retval = tr_func(device, arg);
 2117 
 2118                 if (retval == 0)
 2119                         return(retval);
 2120         }
 2121 
 2122         return(retval);
 2123 }
 2124 
 2125 static int
 2126 xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph,
 2127                   xpt_periphfunc_t *tr_func, void *arg)
 2128 {
 2129         struct cam_periph *periph, *next_periph;
 2130         int retval;
 2131 
 2132         retval = 1;
 2133 
 2134         for (periph = (start_periph ? start_periph :
 2135                        SLIST_FIRST(&device->periphs));
 2136              periph != NULL;
 2137              periph = next_periph) {
 2138 
 2139                 next_periph = SLIST_NEXT(periph, periph_links);
 2140 
 2141                 retval = tr_func(periph, arg);
 2142                 if (retval == 0)
 2143                         return(retval);
 2144         }
 2145 
 2146         return(retval);
 2147 }
 2148 
 2149 static int
 2150 xptpdrvtraverse(struct periph_driver **start_pdrv,
 2151                 xpt_pdrvfunc_t *tr_func, void *arg)
 2152 {
 2153         struct periph_driver **pdrv;
 2154         int retval;
 2155 
 2156         retval = 1;
 2157 
 2158         /*
 2159          * We don't traverse the peripheral driver list like we do the
 2160          * other lists, because it is a linker set, and therefore cannot be
 2161          * changed during runtime.  If the peripheral driver list is ever
 2162          * re-done to be something other than a linker set (i.e. it can
 2163          * change while the system is running), the list traversal should
 2164          * be modified to work like the other traversal functions.
 2165          */
 2166         for (pdrv = (start_pdrv ? start_pdrv : periph_drivers);
 2167              *pdrv != NULL; pdrv++) {
 2168                 retval = tr_func(pdrv, arg);
 2169 
 2170                 if (retval == 0)
 2171                         return(retval);
 2172         }
 2173 
 2174         return(retval);
 2175 }
 2176 
 2177 static int
 2178 xptpdperiphtraverse(struct periph_driver **pdrv,
 2179                     struct cam_periph *start_periph,
 2180                     xpt_periphfunc_t *tr_func, void *arg)
 2181 {
 2182         struct cam_periph *periph, *next_periph;
 2183         int retval;
 2184 
 2185         retval = 1;
 2186 
 2187         xpt_lock_buses();
 2188         for (periph = (start_periph ? start_periph :
 2189              TAILQ_FIRST(&(*pdrv)->units)); periph != NULL;
 2190              periph = next_periph) {
 2191 
 2192                 next_periph = TAILQ_NEXT(periph, unit_links);
 2193 
 2194                 retval = tr_func(periph, arg);
 2195                 if (retval == 0) {
 2196                         xpt_unlock_buses();
 2197                         return(retval);
 2198                 }
 2199         }
 2200         xpt_unlock_buses();
 2201         return(retval);
 2202 }
 2203 
 2204 static int
 2205 xptdefbusfunc(struct cam_eb *bus, void *arg)
 2206 {
 2207         struct xpt_traverse_config *tr_config;
 2208 
 2209         tr_config = (struct xpt_traverse_config *)arg;
 2210 
 2211         if (tr_config->depth == XPT_DEPTH_BUS) {
 2212                 xpt_busfunc_t *tr_func;
 2213 
 2214                 tr_func = (xpt_busfunc_t *)tr_config->tr_func;
 2215 
 2216                 return(tr_func(bus, tr_config->tr_arg));
 2217         } else
 2218                 return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg));
 2219 }
 2220 
 2221 static int
 2222 xptdeftargetfunc(struct cam_et *target, void *arg)
 2223 {
 2224         struct xpt_traverse_config *tr_config;
 2225 
 2226         tr_config = (struct xpt_traverse_config *)arg;
 2227 
 2228         if (tr_config->depth == XPT_DEPTH_TARGET) {
 2229                 xpt_targetfunc_t *tr_func;
 2230 
 2231                 tr_func = (xpt_targetfunc_t *)tr_config->tr_func;
 2232 
 2233                 return(tr_func(target, tr_config->tr_arg));
 2234         } else
 2235                 return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg));
 2236 }
 2237 
 2238 static int
 2239 xptdefdevicefunc(struct cam_ed *device, void *arg)
 2240 {
 2241         struct xpt_traverse_config *tr_config;
 2242 
 2243         tr_config = (struct xpt_traverse_config *)arg;
 2244 
 2245         if (tr_config->depth == XPT_DEPTH_DEVICE) {
 2246                 xpt_devicefunc_t *tr_func;
 2247 
 2248                 tr_func = (xpt_devicefunc_t *)tr_config->tr_func;
 2249 
 2250                 return(tr_func(device, tr_config->tr_arg));
 2251         } else
 2252                 return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg));
 2253 }
 2254 
 2255 static int
 2256 xptdefperiphfunc(struct cam_periph *periph, void *arg)
 2257 {
 2258         struct xpt_traverse_config *tr_config;
 2259         xpt_periphfunc_t *tr_func;
 2260 
 2261         tr_config = (struct xpt_traverse_config *)arg;
 2262 
 2263         tr_func = (xpt_periphfunc_t *)tr_config->tr_func;
 2264 
 2265         /*
 2266          * Unlike the other default functions, we don't check for depth
 2267          * here.  The peripheral driver level is the last level in the EDT,
 2268          * so if we're here, we should execute the function in question.
 2269          */
 2270         return(tr_func(periph, tr_config->tr_arg));
 2271 }
 2272 
 2273 /*
 2274  * Execute the given function for every bus in the EDT.
 2275  */
 2276 static int
 2277 xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg)
 2278 {
 2279         struct xpt_traverse_config tr_config;
 2280 
 2281         tr_config.depth = XPT_DEPTH_BUS;
 2282         tr_config.tr_func = tr_func;
 2283         tr_config.tr_arg = arg;
 2284 
 2285         return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
 2286 }
 2287 
 2288 /*
 2289  * Execute the given function for every device in the EDT.
 2290  */
 2291 static int
 2292 xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg)
 2293 {
 2294         struct xpt_traverse_config tr_config;
 2295 
 2296         tr_config.depth = XPT_DEPTH_DEVICE;
 2297         tr_config.tr_func = tr_func;
 2298         tr_config.tr_arg = arg;
 2299 
 2300         return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
 2301 }
 2302 
 2303 static int
 2304 xptsetasyncfunc(struct cam_ed *device, void *arg)
 2305 {
 2306         struct cam_path path;
 2307         struct ccb_getdev cgd;
 2308         struct ccb_setasync *csa = (struct ccb_setasync *)arg;
 2309 
 2310         /*
 2311          * Don't report unconfigured devices (Wildcard devs,
 2312          * devices only for target mode, device instances
 2313          * that have been invalidated but are waiting for
 2314          * their last reference count to be released).
 2315          */
 2316         if ((device->flags & CAM_DEV_UNCONFIGURED) != 0)
 2317                 return (1);
 2318 
 2319         xpt_compile_path(&path,
 2320                          NULL,
 2321                          device->target->bus->path_id,
 2322                          device->target->target_id,
 2323                          device->lun_id);
 2324         xpt_setup_ccb(&cgd.ccb_h, &path, CAM_PRIORITY_NORMAL);
 2325         cgd.ccb_h.func_code = XPT_GDEV_TYPE;
 2326         xpt_action((union ccb *)&cgd);
 2327         csa->callback(csa->callback_arg,
 2328                             AC_FOUND_DEVICE,
 2329                             &path, &cgd);
 2330         xpt_release_path(&path);
 2331 
 2332         return(1);
 2333 }
 2334 
 2335 static int
 2336 xptsetasyncbusfunc(struct cam_eb *bus, void *arg)
 2337 {
 2338         struct cam_path path;
 2339         struct ccb_pathinq cpi;
 2340         struct ccb_setasync *csa = (struct ccb_setasync *)arg;
 2341 
 2342         xpt_compile_path(&path, /*periph*/NULL,
 2343                          bus->sim->path_id,
 2344                          CAM_TARGET_WILDCARD,
 2345                          CAM_LUN_WILDCARD);
 2346         xpt_setup_ccb(&cpi.ccb_h, &path, CAM_PRIORITY_NORMAL);
 2347         cpi.ccb_h.func_code = XPT_PATH_INQ;
 2348         xpt_action((union ccb *)&cpi);
 2349         csa->callback(csa->callback_arg,
 2350                             AC_PATH_REGISTERED,
 2351                             &path, &cpi);
 2352         xpt_release_path(&path);
 2353 
 2354         return(1);
 2355 }
 2356 
 2357 void
 2358 xpt_action(union ccb *start_ccb)
 2359 {
 2360 
 2361         CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_action\n"));
 2362 
 2363         start_ccb->ccb_h.status = CAM_REQ_INPROG;
 2364         /* Compatibility for RL-unaware code. */
 2365         if (CAM_PRIORITY_TO_RL(start_ccb->ccb_h.pinfo.priority) == 0)
 2366             start_ccb->ccb_h.pinfo.priority += CAM_PRIORITY_NORMAL - 1;
 2367         (*(start_ccb->ccb_h.path->bus->xport->action))(start_ccb);
 2368 }
 2369 
 2370 void
 2371 xpt_action_default(union ccb *start_ccb)
 2372 {
 2373 #ifdef CAMDEBUG
 2374         char cdb_str[(SCSI_MAX_CDBLEN * 3) + 1];
 2375 #endif
 2376         struct cam_path *path;
 2377 
 2378         path = start_ccb->ccb_h.path;
 2379         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_action_default\n"));
 2380 
 2381         switch (start_ccb->ccb_h.func_code) {
 2382         case XPT_SCSI_IO:
 2383         {
 2384                 struct cam_ed *device;
 2385 
 2386                 /*
 2387                  * For the sake of compatibility with SCSI-1
 2388                  * devices that may not understand the identify
 2389                  * message, we include lun information in the
 2390                  * second byte of all commands.  SCSI-1 specifies
 2391                  * that luns are a 3 bit value and reserves only 3
 2392                  * bits for lun information in the CDB.  Later
 2393                  * revisions of the SCSI spec allow for more than 8
 2394                  * luns, but have deprecated lun information in the
 2395                  * CDB.  So, if the lun won't fit, we must omit.
 2396                  *
 2397                  * Also be aware that during initial probing for devices,
 2398                  * the inquiry information is unknown but initialized to 0.
 2399                  * This means that this code will be exercised while probing
 2400                  * devices with an ANSI revision greater than 2.
 2401                  */
 2402                 device = path->device;
 2403                 if (device->protocol_version <= SCSI_REV_2
 2404                  && start_ccb->ccb_h.target_lun < 8
 2405                  && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) {
 2406 
 2407                         start_ccb->csio.cdb_io.cdb_bytes[1] |=
 2408                             start_ccb->ccb_h.target_lun << 5;
 2409                 }
 2410                 start_ccb->csio.scsi_status = SCSI_STATUS_OK;
 2411                 CAM_DEBUG(path, CAM_DEBUG_CDB,("%s. CDB: %s\n",
 2412                           scsi_op_desc(start_ccb->csio.cdb_io.cdb_bytes[0],
 2413                                        &path->device->inq_data),
 2414                           scsi_cdb_string(start_ccb->csio.cdb_io.cdb_bytes,
 2415                                           cdb_str, sizeof(cdb_str))));
 2416         }
 2417         /* FALLTHROUGH */
 2418         case XPT_TARGET_IO:
 2419         case XPT_CONT_TARGET_IO:
 2420                 start_ccb->csio.sense_resid = 0;
 2421                 start_ccb->csio.resid = 0;
 2422                 /* FALLTHROUGH */
 2423         case XPT_ATA_IO:
 2424                 if (start_ccb->ccb_h.func_code == XPT_ATA_IO) {
 2425                         start_ccb->ataio.resid = 0;
 2426                         CAM_DEBUG(path, CAM_DEBUG_CDB,("%s. ACB: %s\n",
 2427                             ata_op_string(&start_ccb->ataio.cmd),
 2428                             ata_cmd_string(&start_ccb->ataio.cmd,
 2429                                           cdb_str, sizeof(cdb_str))));
 2430                 }
 2431                 /* FALLTHROUGH */
 2432         case XPT_RESET_DEV:
 2433         case XPT_ENG_EXEC:
 2434         case XPT_SMP_IO:
 2435         {
 2436                 int frozen;
 2437 
 2438                 frozen = cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb);
 2439                 path->device->sim->devq->alloc_openings += frozen;
 2440                 if (frozen > 0)
 2441                         xpt_run_dev_allocq(path->bus);
 2442                 if (xpt_schedule_dev_sendq(path->bus, path->device))
 2443                         xpt_run_dev_sendq(path->bus);
 2444                 break;
 2445         }
 2446         case XPT_CALC_GEOMETRY:
 2447         {
 2448                 struct cam_sim *sim;
 2449 
 2450                 /* Filter out garbage */
 2451                 if (start_ccb->ccg.block_size == 0
 2452                  || start_ccb->ccg.volume_size == 0) {
 2453                         start_ccb->ccg.cylinders = 0;
 2454                         start_ccb->ccg.heads = 0;
 2455                         start_ccb->ccg.secs_per_track = 0;
 2456                         start_ccb->ccb_h.status = CAM_REQ_CMP;
 2457                         break;
 2458                 }
 2459 #ifdef PC98
 2460                 /*
 2461                  * In a PC-98 system, geometry translation depens on
 2462                  * the "real" device geometry obtained from mode page 4.
 2463                  * SCSI geometry translation is performed in the
 2464                  * initialization routine of the SCSI BIOS and the result
 2465                  * stored in host memory.  If the translation is available
 2466                  * in host memory, use it.  If not, rely on the default
 2467                  * translation the device driver performs.
 2468                  */
 2469                 if (scsi_da_bios_params(&start_ccb->ccg) != 0) {
 2470                         start_ccb->ccb_h.status = CAM_REQ_CMP;
 2471                         break;
 2472                 }
 2473 #endif
 2474                 sim = path->bus->sim;
 2475                 (*(sim->sim_action))(sim, start_ccb);
 2476                 break;
 2477         }
 2478         case XPT_ABORT:
 2479         {
 2480                 union ccb* abort_ccb;
 2481 
 2482                 abort_ccb = start_ccb->cab.abort_ccb;
 2483                 if (XPT_FC_IS_DEV_QUEUED(abort_ccb)) {
 2484 
 2485                         if (abort_ccb->ccb_h.pinfo.index >= 0) {
 2486                                 struct cam_ccbq *ccbq;
 2487                                 struct cam_ed *device;
 2488 
 2489                                 device = abort_ccb->ccb_h.path->device;
 2490                                 ccbq = &device->ccbq;
 2491                                 device->sim->devq->alloc_openings -= 
 2492                                     cam_ccbq_remove_ccb(ccbq, abort_ccb);
 2493                                 abort_ccb->ccb_h.status =
 2494                                     CAM_REQ_ABORTED|CAM_DEV_QFRZN;
 2495                                 xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
 2496                                 xpt_done(abort_ccb);
 2497                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 2498                                 break;
 2499                         }
 2500                         if (abort_ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX
 2501                          && (abort_ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) {
 2502                                 /*
 2503                                  * We've caught this ccb en route to
 2504                                  * the SIM.  Flag it for abort and the
 2505                                  * SIM will do so just before starting
 2506                                  * real work on the CCB.
 2507                                  */
 2508                                 abort_ccb->ccb_h.status =
 2509                                     CAM_REQ_ABORTED|CAM_DEV_QFRZN;
 2510                                 xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
 2511                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 2512                                 break;
 2513                         }
 2514                 }
 2515                 if (XPT_FC_IS_QUEUED(abort_ccb)
 2516                  && (abort_ccb->ccb_h.pinfo.index == CAM_DONEQ_INDEX)) {
 2517                         /*
 2518                          * It's already completed but waiting
 2519                          * for our SWI to get to it.
 2520                          */
 2521                         start_ccb->ccb_h.status = CAM_UA_ABORT;
 2522                         break;
 2523                 }
 2524                 /*
 2525                  * If we weren't able to take care of the abort request
 2526                  * in the XPT, pass the request down to the SIM for processing.
 2527                  */
 2528         }
 2529         /* FALLTHROUGH */
 2530         case XPT_ACCEPT_TARGET_IO:
 2531         case XPT_EN_LUN:
 2532         case XPT_IMMED_NOTIFY:
 2533         case XPT_NOTIFY_ACK:
 2534         case XPT_RESET_BUS:
 2535         case XPT_IMMEDIATE_NOTIFY:
 2536         case XPT_NOTIFY_ACKNOWLEDGE:
 2537         case XPT_GET_SIM_KNOB:
 2538         case XPT_SET_SIM_KNOB:
 2539         {
 2540                 struct cam_sim *sim;
 2541 
 2542                 sim = path->bus->sim;
 2543                 (*(sim->sim_action))(sim, start_ccb);
 2544                 break;
 2545         }
 2546         case XPT_PATH_INQ:
 2547         {
 2548                 struct cam_sim *sim;
 2549 
 2550                 sim = path->bus->sim;
 2551                 (*(sim->sim_action))(sim, start_ccb);
 2552                 break;
 2553         }
 2554         case XPT_PATH_STATS:
 2555                 start_ccb->cpis.last_reset = path->bus->last_reset;
 2556                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 2557                 break;
 2558         case XPT_GDEV_TYPE:
 2559         {
 2560                 struct cam_ed *dev;
 2561 
 2562                 dev = path->device;
 2563                 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
 2564                         start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
 2565                 } else {
 2566                         struct ccb_getdev *cgd;
 2567 
 2568                         cgd = &start_ccb->cgd;
 2569                         cgd->protocol = dev->protocol;
 2570                         cgd->inq_data = dev->inq_data;
 2571                         cgd->ident_data = dev->ident_data;
 2572                         cgd->inq_flags = dev->inq_flags;
 2573                         cgd->ccb_h.status = CAM_REQ_CMP;
 2574                         cgd->serial_num_len = dev->serial_num_len;
 2575                         if ((dev->serial_num_len > 0)
 2576                          && (dev->serial_num != NULL))
 2577                                 bcopy(dev->serial_num, cgd->serial_num,
 2578                                       dev->serial_num_len);
 2579                 }
 2580                 break;
 2581         }
 2582         case XPT_GDEV_STATS:
 2583         {
 2584                 struct cam_ed *dev;
 2585 
 2586                 dev = path->device;
 2587                 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
 2588                         start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
 2589                 } else {
 2590                         struct ccb_getdevstats *cgds;
 2591                         struct cam_eb *bus;
 2592                         struct cam_et *tar;
 2593 
 2594                         cgds = &start_ccb->cgds;
 2595                         bus = path->bus;
 2596                         tar = path->target;
 2597                         cgds->dev_openings = dev->ccbq.dev_openings;
 2598                         cgds->dev_active = dev->ccbq.dev_active;
 2599                         cgds->devq_openings = dev->ccbq.devq_openings;
 2600                         cgds->devq_queued = dev->ccbq.queue.entries;
 2601                         cgds->held = dev->ccbq.held;
 2602                         cgds->last_reset = tar->last_reset;
 2603                         cgds->maxtags = dev->maxtags;
 2604                         cgds->mintags = dev->mintags;
 2605                         if (timevalcmp(&tar->last_reset, &bus->last_reset, <))
 2606                                 cgds->last_reset = bus->last_reset;
 2607                         cgds->ccb_h.status = CAM_REQ_CMP;
 2608                 }
 2609                 break;
 2610         }
 2611         case XPT_GDEVLIST:
 2612         {
 2613                 struct cam_periph       *nperiph;
 2614                 struct periph_list      *periph_head;
 2615                 struct ccb_getdevlist   *cgdl;
 2616                 u_int                   i;
 2617                 struct cam_ed           *device;
 2618                 int                     found;
 2619 
 2620 
 2621                 found = 0;
 2622 
 2623                 /*
 2624                  * Don't want anyone mucking with our data.
 2625                  */
 2626                 device = path->device;
 2627                 periph_head = &device->periphs;
 2628                 cgdl = &start_ccb->cgdl;
 2629 
 2630                 /*
 2631                  * Check and see if the list has changed since the user
 2632                  * last requested a list member.  If so, tell them that the
 2633                  * list has changed, and therefore they need to start over
 2634                  * from the beginning.
 2635                  */
 2636                 if ((cgdl->index != 0) &&
 2637                     (cgdl->generation != device->generation)) {
 2638                         cgdl->status = CAM_GDEVLIST_LIST_CHANGED;
 2639                         break;
 2640                 }
 2641 
 2642                 /*
 2643                  * Traverse the list of peripherals and attempt to find
 2644                  * the requested peripheral.
 2645                  */
 2646                 for (nperiph = SLIST_FIRST(periph_head), i = 0;
 2647                      (nperiph != NULL) && (i <= cgdl->index);
 2648                      nperiph = SLIST_NEXT(nperiph, periph_links), i++) {
 2649                         if (i == cgdl->index) {
 2650                                 strncpy(cgdl->periph_name,
 2651                                         nperiph->periph_name,
 2652                                         DEV_IDLEN);
 2653                                 cgdl->unit_number = nperiph->unit_number;
 2654                                 found = 1;
 2655                         }
 2656                 }
 2657                 if (found == 0) {
 2658                         cgdl->status = CAM_GDEVLIST_ERROR;
 2659                         break;
 2660                 }
 2661 
 2662                 if (nperiph == NULL)
 2663                         cgdl->status = CAM_GDEVLIST_LAST_DEVICE;
 2664                 else
 2665                         cgdl->status = CAM_GDEVLIST_MORE_DEVS;
 2666 
 2667                 cgdl->index++;
 2668                 cgdl->generation = device->generation;
 2669 
 2670                 cgdl->ccb_h.status = CAM_REQ_CMP;
 2671                 break;
 2672         }
 2673         case XPT_DEV_MATCH:
 2674         {
 2675                 dev_pos_type position_type;
 2676                 struct ccb_dev_match *cdm;
 2677 
 2678                 cdm = &start_ccb->cdm;
 2679 
 2680                 /*
 2681                  * There are two ways of getting at information in the EDT.
 2682                  * The first way is via the primary EDT tree.  It starts
 2683                  * with a list of busses, then a list of targets on a bus,
 2684                  * then devices/luns on a target, and then peripherals on a
 2685                  * device/lun.  The "other" way is by the peripheral driver
 2686                  * lists.  The peripheral driver lists are organized by
 2687                  * peripheral driver.  (obviously)  So it makes sense to
 2688                  * use the peripheral driver list if the user is looking
 2689                  * for something like "da1", or all "da" devices.  If the
 2690                  * user is looking for something on a particular bus/target
 2691                  * or lun, it's generally better to go through the EDT tree.
 2692                  */
 2693 
 2694                 if (cdm->pos.position_type != CAM_DEV_POS_NONE)
 2695                         position_type = cdm->pos.position_type;
 2696                 else {
 2697                         u_int i;
 2698 
 2699                         position_type = CAM_DEV_POS_NONE;
 2700 
 2701                         for (i = 0; i < cdm->num_patterns; i++) {
 2702                                 if ((cdm->patterns[i].type == DEV_MATCH_BUS)
 2703                                  ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){
 2704                                         position_type = CAM_DEV_POS_EDT;
 2705                                         break;
 2706                                 }
 2707                         }
 2708 
 2709                         if (cdm->num_patterns == 0)
 2710                                 position_type = CAM_DEV_POS_EDT;
 2711                         else if (position_type == CAM_DEV_POS_NONE)
 2712                                 position_type = CAM_DEV_POS_PDRV;
 2713                 }
 2714 
 2715                 switch(position_type & CAM_DEV_POS_TYPEMASK) {
 2716                 case CAM_DEV_POS_EDT:
 2717                         xptedtmatch(cdm);
 2718                         break;
 2719                 case CAM_DEV_POS_PDRV:
 2720                         xptperiphlistmatch(cdm);
 2721                         break;
 2722                 default:
 2723                         cdm->status = CAM_DEV_MATCH_ERROR;
 2724                         break;
 2725                 }
 2726 
 2727                 if (cdm->status == CAM_DEV_MATCH_ERROR)
 2728                         start_ccb->ccb_h.status = CAM_REQ_CMP_ERR;
 2729                 else
 2730                         start_ccb->ccb_h.status = CAM_REQ_CMP;
 2731 
 2732                 break;
 2733         }
 2734         case XPT_SASYNC_CB:
 2735         {
 2736                 struct ccb_setasync *csa;
 2737                 struct async_node *cur_entry;
 2738                 struct async_list *async_head;
 2739                 u_int32_t added;
 2740 
 2741                 csa = &start_ccb->csa;
 2742                 added = csa->event_enable;
 2743                 async_head = &path->device->asyncs;
 2744 
 2745                 /*
 2746                  * If there is already an entry for us, simply
 2747                  * update it.
 2748                  */
 2749                 cur_entry = SLIST_FIRST(async_head);
 2750                 while (cur_entry != NULL) {
 2751                         if ((cur_entry->callback_arg == csa->callback_arg)
 2752                          && (cur_entry->callback == csa->callback))
 2753                                 break;
 2754                         cur_entry = SLIST_NEXT(cur_entry, links);
 2755                 }
 2756 
 2757                 if (cur_entry != NULL) {
 2758                         /*
 2759                          * If the request has no flags set,
 2760                          * remove the entry.
 2761                          */
 2762                         added &= ~cur_entry->event_enable;
 2763                         if (csa->event_enable == 0) {
 2764                                 SLIST_REMOVE(async_head, cur_entry,
 2765                                              async_node, links);
 2766                                 xpt_release_device(path->device);
 2767                                 free(cur_entry, M_CAMXPT);
 2768                         } else {
 2769                                 cur_entry->event_enable = csa->event_enable;
 2770                         }
 2771                         csa->event_enable = added;
 2772                 } else {
 2773                         cur_entry = malloc(sizeof(*cur_entry), M_CAMXPT,
 2774                                            M_NOWAIT);
 2775                         if (cur_entry == NULL) {
 2776                                 csa->ccb_h.status = CAM_RESRC_UNAVAIL;
 2777                                 break;
 2778                         }
 2779                         cur_entry->event_enable = csa->event_enable;
 2780                         cur_entry->callback_arg = csa->callback_arg;
 2781                         cur_entry->callback = csa->callback;
 2782                         SLIST_INSERT_HEAD(async_head, cur_entry, links);
 2783                         xpt_acquire_device(path->device);
 2784                 }
 2785                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 2786                 break;
 2787         }
 2788         case XPT_REL_SIMQ:
 2789         {
 2790                 struct ccb_relsim *crs;
 2791                 struct cam_ed *dev;
 2792 
 2793                 crs = &start_ccb->crs;
 2794                 dev = path->device;
 2795                 if (dev == NULL) {
 2796 
 2797                         crs->ccb_h.status = CAM_DEV_NOT_THERE;
 2798                         break;
 2799                 }
 2800 
 2801                 if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) {
 2802 
 2803                         if (INQ_DATA_TQ_ENABLED(&dev->inq_data)) {
 2804                                 /* Don't ever go below one opening */
 2805                                 if (crs->openings > 0) {
 2806                                         xpt_dev_ccbq_resize(path,
 2807                                                             crs->openings);
 2808 
 2809                                         if (bootverbose) {
 2810                                                 xpt_print(path,
 2811                                                     "tagged openings now %d\n",
 2812                                                     crs->openings);
 2813                                         }
 2814                                 }
 2815                         }
 2816                 }
 2817 
 2818                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) {
 2819 
 2820                         if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
 2821 
 2822                                 /*
 2823                                  * Just extend the old timeout and decrement
 2824                                  * the freeze count so that a single timeout
 2825                                  * is sufficient for releasing the queue.
 2826                                  */
 2827                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
 2828                                 callout_stop(&dev->callout);
 2829                         } else {
 2830 
 2831                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
 2832                         }
 2833 
 2834                         callout_reset(&dev->callout,
 2835                             (crs->release_timeout * hz) / 1000,
 2836                             xpt_release_devq_timeout, dev);
 2837 
 2838                         dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING;
 2839 
 2840                 }
 2841 
 2842                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) {
 2843 
 2844                         if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) {
 2845                                 /*
 2846                                  * Decrement the freeze count so that a single
 2847                                  * completion is still sufficient to unfreeze
 2848                                  * the queue.
 2849                                  */
 2850                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
 2851                         } else {
 2852 
 2853                                 dev->flags |= CAM_DEV_REL_ON_COMPLETE;
 2854                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
 2855                         }
 2856                 }
 2857 
 2858                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) {
 2859 
 2860                         if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
 2861                          || (dev->ccbq.dev_active == 0)) {
 2862 
 2863                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
 2864                         } else {
 2865 
 2866                                 dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY;
 2867                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
 2868                         }
 2869                 }
 2870 
 2871                 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0) {
 2872                         xpt_release_devq_rl(path, /*runlevel*/
 2873                             (crs->release_flags & RELSIM_RELEASE_RUNLEVEL) ?
 2874                                 crs->release_timeout : 0,
 2875                             /*count*/1, /*run_queue*/TRUE);
 2876                 }
 2877                 start_ccb->crs.qfrozen_cnt = dev->ccbq.queue.qfrozen_cnt[0];
 2878                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 2879                 break;
 2880         }
 2881         case XPT_DEBUG: {
 2882 #ifdef CAMDEBUG
 2883 #ifdef CAM_DEBUG_DELAY
 2884                 cam_debug_delay = CAM_DEBUG_DELAY;
 2885 #endif
 2886                 cam_dflags = start_ccb->cdbg.flags;
 2887                 if (cam_dpath != NULL) {
 2888                         xpt_free_path(cam_dpath);
 2889                         cam_dpath = NULL;
 2890                 }
 2891 
 2892                 if (cam_dflags != CAM_DEBUG_NONE) {
 2893                         if (xpt_create_path(&cam_dpath, xpt_periph,
 2894                                             start_ccb->ccb_h.path_id,
 2895                                             start_ccb->ccb_h.target_id,
 2896                                             start_ccb->ccb_h.target_lun) !=
 2897                                             CAM_REQ_CMP) {
 2898                                 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
 2899                                 cam_dflags = CAM_DEBUG_NONE;
 2900                         } else {
 2901                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 2902                                 xpt_print(cam_dpath, "debugging flags now %x\n",
 2903                                     cam_dflags);
 2904                         }
 2905                 } else {
 2906                         cam_dpath = NULL;
 2907                         start_ccb->ccb_h.status = CAM_REQ_CMP;
 2908                 }
 2909 #else /* !CAMDEBUG */
 2910                 start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
 2911 #endif /* CAMDEBUG */
 2912                 break;
 2913         }
 2914         case XPT_FREEZE_QUEUE:
 2915         {
 2916                 struct ccb_relsim *crs = &start_ccb->crs;
 2917 
 2918                 xpt_freeze_devq_rl(path, /*runlevel*/
 2919                     (crs->release_flags & RELSIM_RELEASE_RUNLEVEL) ?
 2920                     crs->release_timeout : 0, /*count*/1);
 2921                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 2922                 break;
 2923         }
 2924         case XPT_NOOP:
 2925                 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0)
 2926                         xpt_freeze_devq(path, 1);
 2927                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 2928                 break;
 2929         default:
 2930         case XPT_SDEV_TYPE:
 2931         case XPT_TERM_IO:
 2932         case XPT_ENG_INQ:
 2933                 /* XXX Implement */
 2934                 printf("%s: CCB type %#x not supported\n", __func__,
 2935                        start_ccb->ccb_h.func_code);
 2936                 start_ccb->ccb_h.status = CAM_PROVIDE_FAIL;
 2937                 if (start_ccb->ccb_h.func_code & XPT_FC_DEV_QUEUED) {
 2938                         xpt_done(start_ccb);
 2939                 }
 2940                 break;
 2941         }
 2942 }
 2943 
 2944 void
 2945 xpt_polled_action(union ccb *start_ccb)
 2946 {
 2947         u_int32_t timeout;
 2948         struct    cam_sim *sim;
 2949         struct    cam_devq *devq;
 2950         struct    cam_ed *dev;
 2951 
 2952 
 2953         timeout = start_ccb->ccb_h.timeout * 10;
 2954         sim = start_ccb->ccb_h.path->bus->sim;
 2955         devq = sim->devq;
 2956         dev = start_ccb->ccb_h.path->device;
 2957 
 2958         mtx_assert(sim->mtx, MA_OWNED);
 2959 
 2960         /*
 2961          * Steal an opening so that no other queued requests
 2962          * can get it before us while we simulate interrupts.
 2963          */
 2964         dev->ccbq.devq_openings--;
 2965         dev->ccbq.dev_openings--;
 2966 
 2967         while(((devq != NULL && devq->send_openings <= 0) ||
 2968            dev->ccbq.dev_openings < 0) && (--timeout > 0)) {
 2969                 DELAY(100);
 2970                 (*(sim->sim_poll))(sim);
 2971                 camisr_runqueue(&sim->sim_doneq);
 2972         }
 2973 
 2974         dev->ccbq.devq_openings++;
 2975         dev->ccbq.dev_openings++;
 2976 
 2977         if (timeout != 0) {
 2978                 xpt_action(start_ccb);
 2979                 while(--timeout > 0) {
 2980                         (*(sim->sim_poll))(sim);
 2981                         camisr_runqueue(&sim->sim_doneq);
 2982                         if ((start_ccb->ccb_h.status  & CAM_STATUS_MASK)
 2983                             != CAM_REQ_INPROG)
 2984                                 break;
 2985                         DELAY(100);
 2986                 }
 2987                 if (timeout == 0) {
 2988                         /*
 2989                          * XXX Is it worth adding a sim_timeout entry
 2990                          * point so we can attempt recovery?  If
 2991                          * this is only used for dumps, I don't think
 2992                          * it is.
 2993                          */
 2994                         start_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
 2995                 }
 2996         } else {
 2997                 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
 2998         }
 2999 }
 3000 
 3001 /*
 3002  * Schedule a peripheral driver to receive a ccb when it's
 3003  * target device has space for more transactions.
 3004  */
 3005 void
 3006 xpt_schedule(struct cam_periph *perph, u_int32_t new_priority)
 3007 {
 3008         struct cam_ed *device;
 3009         int runq = 0;
 3010 
 3011         mtx_assert(perph->sim->mtx, MA_OWNED);
 3012 
 3013         CAM_DEBUG(perph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n"));
 3014         device = perph->path->device;
 3015         if (periph_is_queued(perph)) {
 3016                 /* Simply reorder based on new priority */
 3017                 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
 3018                           ("   change priority to %d\n", new_priority));
 3019                 if (new_priority < perph->pinfo.priority) {
 3020                         camq_change_priority(&device->drvq,
 3021                                              perph->pinfo.index,
 3022                                              new_priority);
 3023                         runq = xpt_schedule_dev_allocq(perph->path->bus, device);
 3024                 }
 3025         } else {
 3026                 /* New entry on the queue */
 3027                 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
 3028                           ("   added periph to queue\n"));
 3029                 perph->pinfo.priority = new_priority;
 3030                 perph->pinfo.generation = ++device->drvq.generation;
 3031                 camq_insert(&device->drvq, &perph->pinfo);
 3032                 runq = xpt_schedule_dev_allocq(perph->path->bus, device);
 3033         }
 3034         if (runq != 0) {
 3035                 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
 3036                           ("   calling xpt_run_devq\n"));
 3037                 xpt_run_dev_allocq(perph->path->bus);
 3038         }
 3039 }
 3040 
 3041 
 3042 /*
 3043  * Schedule a device to run on a given queue.
 3044  * If the device was inserted as a new entry on the queue,
 3045  * return 1 meaning the device queue should be run. If we
 3046  * were already queued, implying someone else has already
 3047  * started the queue, return 0 so the caller doesn't attempt
 3048  * to run the queue.
 3049  */
 3050 int
 3051 xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo,
 3052                  u_int32_t new_priority)
 3053 {
 3054         int retval;
 3055         u_int32_t old_priority;
 3056 
 3057         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_schedule_dev\n"));
 3058 
 3059         old_priority = pinfo->priority;
 3060 
 3061         /*
 3062          * Are we already queued?
 3063          */
 3064         if (pinfo->index != CAM_UNQUEUED_INDEX) {
 3065                 /* Simply reorder based on new priority */
 3066                 if (new_priority < old_priority) {
 3067                         camq_change_priority(queue, pinfo->index,
 3068                                              new_priority);
 3069                         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3070                                         ("changed priority to %d\n",
 3071                                          new_priority));
 3072                         retval = 1;
 3073                 } else
 3074                         retval = 0;
 3075         } else {
 3076                 /* New entry on the queue */
 3077                 if (new_priority < old_priority)
 3078                         pinfo->priority = new_priority;
 3079 
 3080                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3081                                 ("Inserting onto queue\n"));
 3082                 pinfo->generation = ++queue->generation;
 3083                 camq_insert(queue, pinfo);
 3084                 retval = 1;
 3085         }
 3086         return (retval);
 3087 }
 3088 
 3089 static void
 3090 xpt_run_dev_allocq(struct cam_eb *bus)
 3091 {
 3092         struct  cam_devq *devq;
 3093 
 3094         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_allocq\n"));
 3095         devq = bus->sim->devq;
 3096 
 3097         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3098                         ("   qfrozen_cnt == 0x%x, entries == %d, "
 3099                          "openings == %d, active == %d\n",
 3100                          devq->alloc_queue.qfrozen_cnt[0],
 3101                          devq->alloc_queue.entries,
 3102                          devq->alloc_openings,
 3103                          devq->alloc_active));
 3104 
 3105         devq->alloc_queue.qfrozen_cnt[0]++;
 3106         while ((devq->alloc_queue.entries > 0)
 3107             && (devq->alloc_openings > 0)
 3108             && (devq->alloc_queue.qfrozen_cnt[0] <= 1)) {
 3109                 struct  cam_ed_qinfo *qinfo;
 3110                 struct  cam_ed *device;
 3111                 union   ccb *work_ccb;
 3112                 struct  cam_periph *drv;
 3113                 struct  camq *drvq;
 3114 
 3115                 qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->alloc_queue,
 3116                                                            CAMQ_HEAD);
 3117                 device = qinfo->device;
 3118                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3119                                 ("running device %p\n", device));
 3120 
 3121                 drvq = &device->drvq;
 3122 
 3123 #ifdef CAMDEBUG
 3124                 if (drvq->entries <= 0) {
 3125                         panic("xpt_run_dev_allocq: "
 3126                               "Device on queue without any work to do");
 3127                 }
 3128 #endif
 3129                 if ((work_ccb = xpt_get_ccb(device)) != NULL) {
 3130                         devq->alloc_openings--;
 3131                         devq->alloc_active++;
 3132                         drv = (struct cam_periph*)camq_remove(drvq, CAMQ_HEAD);
 3133                         xpt_setup_ccb(&work_ccb->ccb_h, drv->path,
 3134                                       drv->pinfo.priority);
 3135                         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3136                                         ("calling periph start\n"));
 3137                         drv->periph_start(drv, work_ccb);
 3138                 } else {
 3139                         /*
 3140                          * Malloc failure in alloc_ccb
 3141                          */
 3142                         /*
 3143                          * XXX add us to a list to be run from free_ccb
 3144                          * if we don't have any ccbs active on this
 3145                          * device queue otherwise we may never get run
 3146                          * again.
 3147                          */
 3148                         break;
 3149                 }
 3150 
 3151                 /* We may have more work. Attempt to reschedule. */
 3152                 xpt_schedule_dev_allocq(bus, device);
 3153         }
 3154         devq->alloc_queue.qfrozen_cnt[0]--;
 3155 }
 3156 
 3157 static void
 3158 xpt_run_dev_sendq(struct cam_eb *bus)
 3159 {
 3160         struct  cam_devq *devq;
 3161 
 3162         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_sendq\n"));
 3163 
 3164         devq = bus->sim->devq;
 3165 
 3166         devq->send_queue.qfrozen_cnt[0]++;
 3167         while ((devq->send_queue.entries > 0)
 3168             && (devq->send_openings > 0)
 3169             && (devq->send_queue.qfrozen_cnt[0] <= 1)) {
 3170                 struct  cam_ed_qinfo *qinfo;
 3171                 struct  cam_ed *device;
 3172                 union ccb *work_ccb;
 3173                 struct  cam_sim *sim;
 3174 
 3175                 qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->send_queue,
 3176                                                            CAMQ_HEAD);
 3177                 device = qinfo->device;
 3178                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3179                                 ("running device %p\n", device));
 3180 
 3181                 work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD);
 3182                 if (work_ccb == NULL) {
 3183                         printf("device on run queue with no ccbs???\n");
 3184                         continue;
 3185                 }
 3186 
 3187                 if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) {
 3188 
 3189                         mtx_lock(&xsoftc.xpt_lock);
 3190                         if (xsoftc.num_highpower <= 0) {
 3191                                 /*
 3192                                  * We got a high power command, but we
 3193                                  * don't have any available slots.  Freeze
 3194                                  * the device queue until we have a slot
 3195                                  * available.
 3196                                  */
 3197                                 xpt_freeze_devq(work_ccb->ccb_h.path, 1);
 3198                                 STAILQ_INSERT_TAIL(&xsoftc.highpowerq,
 3199                                                    &work_ccb->ccb_h,
 3200                                                    xpt_links.stqe);
 3201 
 3202                                 mtx_unlock(&xsoftc.xpt_lock);
 3203                                 continue;
 3204                         } else {
 3205                                 /*
 3206                                  * Consume a high power slot while
 3207                                  * this ccb runs.
 3208                                  */
 3209                                 xsoftc.num_highpower--;
 3210                         }
 3211                         mtx_unlock(&xsoftc.xpt_lock);
 3212                 }
 3213                 cam_ccbq_remove_ccb(&device->ccbq, work_ccb);
 3214                 cam_ccbq_send_ccb(&device->ccbq, work_ccb);
 3215 
 3216                 devq->send_openings--;
 3217                 devq->send_active++;
 3218 
 3219                 xpt_schedule_dev_sendq(bus, device);
 3220 
 3221                 if (work_ccb && (work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0){
 3222                         /*
 3223                          * The client wants to freeze the queue
 3224                          * after this CCB is sent.
 3225                          */
 3226                         xpt_freeze_devq(work_ccb->ccb_h.path, 1);
 3227                 }
 3228 
 3229                 /* In Target mode, the peripheral driver knows best... */
 3230                 if (work_ccb->ccb_h.func_code == XPT_SCSI_IO) {
 3231                         if ((device->inq_flags & SID_CmdQue) != 0
 3232                          && work_ccb->csio.tag_action != CAM_TAG_ACTION_NONE)
 3233                                 work_ccb->ccb_h.flags |= CAM_TAG_ACTION_VALID;
 3234                         else
 3235                                 /*
 3236                                  * Clear this in case of a retried CCB that
 3237                                  * failed due to a rejected tag.
 3238                                  */
 3239                                 work_ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
 3240                 }
 3241 
 3242                 /*
 3243                  * Device queues can be shared among multiple sim instances
 3244                  * that reside on different busses.  Use the SIM in the queue
 3245                  * CCB's path, rather than the one in the bus that was passed
 3246                  * into this function.
 3247                  */
 3248                 sim = work_ccb->ccb_h.path->bus->sim;
 3249                 (*(sim->sim_action))(sim, work_ccb);
 3250         }
 3251         devq->send_queue.qfrozen_cnt[0]--;
 3252 }
 3253 
 3254 /*
 3255  * This function merges stuff from the slave ccb into the master ccb, while
 3256  * keeping important fields in the master ccb constant.
 3257  */
 3258 void
 3259 xpt_merge_ccb(union ccb *master_ccb, union ccb *slave_ccb)
 3260 {
 3261 
 3262         /*
 3263          * Pull fields that are valid for peripheral drivers to set
 3264          * into the master CCB along with the CCB "payload".
 3265          */
 3266         master_ccb->ccb_h.retry_count = slave_ccb->ccb_h.retry_count;
 3267         master_ccb->ccb_h.func_code = slave_ccb->ccb_h.func_code;
 3268         master_ccb->ccb_h.timeout = slave_ccb->ccb_h.timeout;
 3269         master_ccb->ccb_h.flags = slave_ccb->ccb_h.flags;
 3270         bcopy(&(&slave_ccb->ccb_h)[1], &(&master_ccb->ccb_h)[1],
 3271               sizeof(union ccb) - sizeof(struct ccb_hdr));
 3272 }
 3273 
 3274 void
 3275 xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority)
 3276 {
 3277 
 3278         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_setup_ccb\n"));
 3279         ccb_h->pinfo.priority = priority;
 3280         ccb_h->path = path;
 3281         ccb_h->path_id = path->bus->path_id;
 3282         if (path->target)
 3283                 ccb_h->target_id = path->target->target_id;
 3284         else
 3285                 ccb_h->target_id = CAM_TARGET_WILDCARD;
 3286         if (path->device) {
 3287                 ccb_h->target_lun = path->device->lun_id;
 3288                 ccb_h->pinfo.generation = ++path->device->ccbq.queue.generation;
 3289         } else {
 3290                 ccb_h->target_lun = CAM_TARGET_WILDCARD;
 3291         }
 3292         ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
 3293         ccb_h->flags = 0;
 3294 }
 3295 
 3296 /* Path manipulation functions */
 3297 cam_status
 3298 xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph,
 3299                 path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
 3300 {
 3301         struct     cam_path *path;
 3302         cam_status status;
 3303 
 3304         path = (struct cam_path *)malloc(sizeof(*path), M_CAMXPT, M_NOWAIT);
 3305 
 3306         if (path == NULL) {
 3307                 status = CAM_RESRC_UNAVAIL;
 3308                 return(status);
 3309         }
 3310         status = xpt_compile_path(path, perph, path_id, target_id, lun_id);
 3311         if (status != CAM_REQ_CMP) {
 3312                 free(path, M_CAMXPT);
 3313                 path = NULL;
 3314         }
 3315         *new_path_ptr = path;
 3316         return (status);
 3317 }
 3318 
 3319 cam_status
 3320 xpt_create_path_unlocked(struct cam_path **new_path_ptr,
 3321                          struct cam_periph *periph, path_id_t path_id,
 3322                          target_id_t target_id, lun_id_t lun_id)
 3323 {
 3324         struct     cam_path *path;
 3325         struct     cam_eb *bus = NULL;
 3326         cam_status status;
 3327         int        need_unlock = 0;
 3328 
 3329         path = (struct cam_path *)malloc(sizeof(*path), M_CAMXPT, M_WAITOK);
 3330 
 3331         if (path_id != CAM_BUS_WILDCARD) {
 3332                 bus = xpt_find_bus(path_id);
 3333                 if (bus != NULL) {
 3334                         need_unlock = 1;
 3335                         CAM_SIM_LOCK(bus->sim);
 3336                 }
 3337         }
 3338         status = xpt_compile_path(path, periph, path_id, target_id, lun_id);
 3339         if (need_unlock) {
 3340                 CAM_SIM_UNLOCK(bus->sim);
 3341                 xpt_release_bus(bus);
 3342         }
 3343         if (status != CAM_REQ_CMP) {
 3344                 free(path, M_CAMXPT);
 3345                 path = NULL;
 3346         }
 3347         *new_path_ptr = path;
 3348         return (status);
 3349 }
 3350 
 3351 cam_status
 3352 xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph,
 3353                  path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
 3354 {
 3355         struct       cam_eb *bus;
 3356         struct       cam_et *target;
 3357         struct       cam_ed *device;
 3358         cam_status   status;
 3359 
 3360         status = CAM_REQ_CMP;   /* Completed without error */
 3361         target = NULL;          /* Wildcarded */
 3362         device = NULL;          /* Wildcarded */
 3363 
 3364         /*
 3365          * We will potentially modify the EDT, so block interrupts
 3366          * that may attempt to create cam paths.
 3367          */
 3368         bus = xpt_find_bus(path_id);
 3369         if (bus == NULL) {
 3370                 status = CAM_PATH_INVALID;
 3371         } else {
 3372                 target = xpt_find_target(bus, target_id);
 3373                 if (target == NULL) {
 3374                         /* Create one */
 3375                         struct cam_et *new_target;
 3376 
 3377                         new_target = xpt_alloc_target(bus, target_id);
 3378                         if (new_target == NULL) {
 3379                                 status = CAM_RESRC_UNAVAIL;
 3380                         } else {
 3381                                 target = new_target;
 3382                         }
 3383                 }
 3384                 if (target != NULL) {
 3385                         device = xpt_find_device(target, lun_id);
 3386                         if (device == NULL) {
 3387                                 /* Create one */
 3388                                 struct cam_ed *new_device;
 3389 
 3390                                 new_device =
 3391                                     (*(bus->xport->alloc_device))(bus,
 3392                                                                       target,
 3393                                                                       lun_id);
 3394                                 if (new_device == NULL) {
 3395                                         status = CAM_RESRC_UNAVAIL;
 3396                                 } else {
 3397                                         device = new_device;
 3398                                 }
 3399                         }
 3400                 }
 3401         }
 3402 
 3403         /*
 3404          * Only touch the user's data if we are successful.
 3405          */
 3406         if (status == CAM_REQ_CMP) {
 3407                 new_path->periph = perph;
 3408                 new_path->bus = bus;
 3409                 new_path->target = target;
 3410                 new_path->device = device;
 3411                 CAM_DEBUG(new_path, CAM_DEBUG_TRACE, ("xpt_compile_path\n"));
 3412         } else {
 3413                 if (device != NULL)
 3414                         xpt_release_device(device);
 3415                 if (target != NULL)
 3416                         xpt_release_target(target);
 3417                 if (bus != NULL)
 3418                         xpt_release_bus(bus);
 3419         }
 3420         return (status);
 3421 }
 3422 
 3423 void
 3424 xpt_release_path(struct cam_path *path)
 3425 {
 3426         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_path\n"));
 3427         if (path->device != NULL) {
 3428                 xpt_release_device(path->device);
 3429                 path->device = NULL;
 3430         }
 3431         if (path->target != NULL) {
 3432                 xpt_release_target(path->target);
 3433                 path->target = NULL;
 3434         }
 3435         if (path->bus != NULL) {
 3436                 xpt_release_bus(path->bus);
 3437                 path->bus = NULL;
 3438         }
 3439 }
 3440 
 3441 void
 3442 xpt_free_path(struct cam_path *path)
 3443 {
 3444 
 3445         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_free_path\n"));
 3446         xpt_release_path(path);
 3447         free(path, M_CAMXPT);
 3448 }
 3449 
 3450 void
 3451 xpt_path_counts(struct cam_path *path, uint32_t *bus_ref,
 3452     uint32_t *periph_ref, uint32_t *target_ref, uint32_t *device_ref)
 3453 {
 3454 
 3455         mtx_lock(&xsoftc.xpt_topo_lock);
 3456         if (bus_ref) {
 3457                 if (path->bus)
 3458                         *bus_ref = path->bus->refcount;
 3459                 else
 3460                         *bus_ref = 0;
 3461         }
 3462         mtx_unlock(&xsoftc.xpt_topo_lock);
 3463         if (periph_ref) {
 3464                 if (path->periph)
 3465                         *periph_ref = path->periph->refcount;
 3466                 else
 3467                         *periph_ref = 0;
 3468         }
 3469         if (target_ref) {
 3470                 if (path->target)
 3471                         *target_ref = path->target->refcount;
 3472                 else
 3473                         *target_ref = 0;
 3474         }
 3475         if (device_ref) {
 3476                 if (path->device)
 3477                         *device_ref = path->device->refcount;
 3478                 else
 3479                         *device_ref = 0;
 3480         }
 3481 }
 3482 
 3483 /*
 3484  * Return -1 for failure, 0 for exact match, 1 for match with wildcards
 3485  * in path1, 2 for match with wildcards in path2.
 3486  */
 3487 int
 3488 xpt_path_comp(struct cam_path *path1, struct cam_path *path2)
 3489 {
 3490         int retval = 0;
 3491 
 3492         if (path1->bus != path2->bus) {
 3493                 if (path1->bus->path_id == CAM_BUS_WILDCARD)
 3494                         retval = 1;
 3495                 else if (path2->bus->path_id == CAM_BUS_WILDCARD)
 3496                         retval = 2;
 3497                 else
 3498                         return (-1);
 3499         }
 3500         if (path1->target != path2->target) {
 3501                 if (path1->target->target_id == CAM_TARGET_WILDCARD) {
 3502                         if (retval == 0)
 3503                                 retval = 1;
 3504                 } else if (path2->target->target_id == CAM_TARGET_WILDCARD)
 3505                         retval = 2;
 3506                 else
 3507                         return (-1);
 3508         }
 3509         if (path1->device != path2->device) {
 3510                 if (path1->device->lun_id == CAM_LUN_WILDCARD) {
 3511                         if (retval == 0)
 3512                                 retval = 1;
 3513                 } else if (path2->device->lun_id == CAM_LUN_WILDCARD)
 3514                         retval = 2;
 3515                 else
 3516                         return (-1);
 3517         }
 3518         return (retval);
 3519 }
 3520 
 3521 void
 3522 xpt_print_path(struct cam_path *path)
 3523 {
 3524 
 3525         if (path == NULL)
 3526                 printf("(nopath): ");
 3527         else {
 3528                 if (path->periph != NULL)
 3529                         printf("(%s%d:", path->periph->periph_name,
 3530                                path->periph->unit_number);
 3531                 else
 3532                         printf("(noperiph:");
 3533 
 3534                 if (path->bus != NULL)
 3535                         printf("%s%d:%d:", path->bus->sim->sim_name,
 3536                                path->bus->sim->unit_number,
 3537                                path->bus->sim->bus_id);
 3538                 else
 3539                         printf("nobus:");
 3540 
 3541                 if (path->target != NULL)
 3542                         printf("%d:", path->target->target_id);
 3543                 else
 3544                         printf("X:");
 3545 
 3546                 if (path->device != NULL)
 3547                         printf("%d): ", path->device->lun_id);
 3548                 else
 3549                         printf("X): ");
 3550         }
 3551 }
 3552 
 3553 void
 3554 xpt_print(struct cam_path *path, const char *fmt, ...)
 3555 {
 3556         va_list ap;
 3557         xpt_print_path(path);
 3558         va_start(ap, fmt);
 3559         vprintf(fmt, ap);
 3560         va_end(ap);
 3561 }
 3562 
 3563 int
 3564 xpt_path_string(struct cam_path *path, char *str, size_t str_len)
 3565 {
 3566         struct sbuf sb;
 3567 
 3568 #ifdef INVARIANTS
 3569         if (path != NULL && path->bus != NULL)
 3570                 mtx_assert(path->bus->sim->mtx, MA_OWNED);
 3571 #endif
 3572 
 3573         sbuf_new(&sb, str, str_len, 0);
 3574 
 3575         if (path == NULL)
 3576                 sbuf_printf(&sb, "(nopath): ");
 3577         else {
 3578                 if (path->periph != NULL)
 3579                         sbuf_printf(&sb, "(%s%d:", path->periph->periph_name,
 3580                                     path->periph->unit_number);
 3581                 else
 3582                         sbuf_printf(&sb, "(noperiph:");
 3583 
 3584                 if (path->bus != NULL)
 3585                         sbuf_printf(&sb, "%s%d:%d:", path->bus->sim->sim_name,
 3586                                     path->bus->sim->unit_number,
 3587                                     path->bus->sim->bus_id);
 3588                 else
 3589                         sbuf_printf(&sb, "nobus:");
 3590 
 3591                 if (path->target != NULL)
 3592                         sbuf_printf(&sb, "%d:", path->target->target_id);
 3593                 else
 3594                         sbuf_printf(&sb, "X:");
 3595 
 3596                 if (path->device != NULL)
 3597                         sbuf_printf(&sb, "%d): ", path->device->lun_id);
 3598                 else
 3599                         sbuf_printf(&sb, "X): ");
 3600         }
 3601         sbuf_finish(&sb);
 3602 
 3603         return(sbuf_len(&sb));
 3604 }
 3605 
 3606 path_id_t
 3607 xpt_path_path_id(struct cam_path *path)
 3608 {
 3609         return(path->bus->path_id);
 3610 }
 3611 
 3612 target_id_t
 3613 xpt_path_target_id(struct cam_path *path)
 3614 {
 3615         if (path->target != NULL)
 3616                 return (path->target->target_id);
 3617         else
 3618                 return (CAM_TARGET_WILDCARD);
 3619 }
 3620 
 3621 lun_id_t
 3622 xpt_path_lun_id(struct cam_path *path)
 3623 {
 3624         if (path->device != NULL)
 3625                 return (path->device->lun_id);
 3626         else
 3627                 return (CAM_LUN_WILDCARD);
 3628 }
 3629 
 3630 struct cam_sim *
 3631 xpt_path_sim(struct cam_path *path)
 3632 {
 3633 
 3634         return (path->bus->sim);
 3635 }
 3636 
 3637 struct cam_periph*
 3638 xpt_path_periph(struct cam_path *path)
 3639 {
 3640         mtx_assert(path->bus->sim->mtx, MA_OWNED);
 3641 
 3642         return (path->periph);
 3643 }
 3644 
 3645 int
 3646 xpt_path_legacy_ata_id(struct cam_path *path)
 3647 {
 3648         struct cam_eb *bus;
 3649         int bus_id;
 3650 
 3651         if ((strcmp(path->bus->sim->sim_name, "ata") != 0) &&
 3652             strcmp(path->bus->sim->sim_name, "ahcich") != 0 &&
 3653             strcmp(path->bus->sim->sim_name, "mvsch") != 0 &&
 3654             strcmp(path->bus->sim->sim_name, "siisch") != 0)
 3655                 return (-1);
 3656 
 3657         if (strcmp(path->bus->sim->sim_name, "ata") == 0 &&
 3658             path->bus->sim->unit_number < 2) {
 3659                 bus_id = path->bus->sim->unit_number;
 3660         } else {
 3661                 bus_id = 2;
 3662                 xpt_lock_buses();
 3663                 TAILQ_FOREACH(bus, &xsoftc.xpt_busses, links) {
 3664                         if (bus == path->bus)
 3665                                 break;
 3666                         if ((strcmp(bus->sim->sim_name, "ata") == 0 &&
 3667                              bus->sim->unit_number >= 2) ||
 3668                             strcmp(bus->sim->sim_name, "ahcich") == 0 ||
 3669                             strcmp(bus->sim->sim_name, "mvsch") == 0 ||
 3670                             strcmp(bus->sim->sim_name, "siisch") == 0)
 3671                                 bus_id++;
 3672                 }
 3673                 xpt_unlock_buses();
 3674         }
 3675         if (path->target != NULL) {
 3676                 if (path->target->target_id < 2)
 3677                         return (bus_id * 2 + path->target->target_id);
 3678                 else
 3679                         return (-1);
 3680         } else
 3681                 return (bus_id * 2);
 3682 }
 3683 
 3684 /*
 3685  * Release a CAM control block for the caller.  Remit the cost of the structure
 3686  * to the device referenced by the path.  If the this device had no 'credits'
 3687  * and peripheral drivers have registered async callbacks for this notification
 3688  * call them now.
 3689  */
 3690 void
 3691 xpt_release_ccb(union ccb *free_ccb)
 3692 {
 3693         struct   cam_path *path;
 3694         struct   cam_ed *device;
 3695         struct   cam_eb *bus;
 3696         struct   cam_sim *sim;
 3697 
 3698         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_release_ccb\n"));
 3699         path = free_ccb->ccb_h.path;
 3700         device = path->device;
 3701         bus = path->bus;
 3702         sim = bus->sim;
 3703 
 3704         mtx_assert(sim->mtx, MA_OWNED);
 3705 
 3706         cam_ccbq_release_opening(&device->ccbq);
 3707         if (device->flags & CAM_DEV_RESIZE_QUEUE_NEEDED) {
 3708                 device->flags &= ~CAM_DEV_RESIZE_QUEUE_NEEDED;
 3709                 cam_ccbq_resize(&device->ccbq,
 3710                     device->ccbq.dev_openings + device->ccbq.dev_active);
 3711         }
 3712         if (sim->ccb_count > sim->max_ccbs) {
 3713                 xpt_free_ccb(free_ccb);
 3714                 sim->ccb_count--;
 3715         } else {
 3716                 SLIST_INSERT_HEAD(&sim->ccb_freeq, &free_ccb->ccb_h,
 3717                     xpt_links.sle);
 3718         }
 3719         if (sim->devq == NULL) {
 3720                 return;
 3721         }
 3722         sim->devq->alloc_openings++;
 3723         sim->devq->alloc_active--;
 3724         if (device_is_alloc_queued(device) == 0)
 3725                 xpt_schedule_dev_allocq(bus, device);
 3726         xpt_run_dev_allocq(bus);
 3727 }
 3728 
 3729 /* Functions accessed by SIM drivers */
 3730 
 3731 static struct xpt_xport xport_default = {
 3732         .alloc_device = xpt_alloc_device_default,
 3733         .action = xpt_action_default,
 3734         .async = xpt_dev_async_default,
 3735 };
 3736 
 3737 /*
 3738  * A sim structure, listing the SIM entry points and instance
 3739  * identification info is passed to xpt_bus_register to hook the SIM
 3740  * into the CAM framework.  xpt_bus_register creates a cam_eb entry
 3741  * for this new bus and places it in the array of busses and assigns
 3742  * it a path_id.  The path_id may be influenced by "hard wiring"
 3743  * information specified by the user.  Once interrupt services are
 3744  * available, the bus will be probed.
 3745  */
 3746 int32_t
 3747 xpt_bus_register(struct cam_sim *sim, device_t parent, u_int32_t bus)
 3748 {
 3749         struct cam_eb *new_bus;
 3750         struct cam_eb *old_bus;
 3751         struct ccb_pathinq cpi;
 3752         struct cam_path *path;
 3753         cam_status status;
 3754 
 3755         mtx_assert(sim->mtx, MA_OWNED);
 3756 
 3757         sim->bus_id = bus;
 3758         new_bus = (struct cam_eb *)malloc(sizeof(*new_bus),
 3759                                           M_CAMXPT, M_NOWAIT);
 3760         if (new_bus == NULL) {
 3761                 /* Couldn't satisfy request */
 3762                 return (CAM_RESRC_UNAVAIL);
 3763         }
 3764         path = (struct cam_path *)malloc(sizeof(*path), M_CAMXPT, M_NOWAIT);
 3765         if (path == NULL) {
 3766                 free(new_bus, M_CAMXPT);
 3767                 return (CAM_RESRC_UNAVAIL);
 3768         }
 3769 
 3770         if (strcmp(sim->sim_name, "xpt") != 0) {
 3771                 sim->path_id =
 3772                     xptpathid(sim->sim_name, sim->unit_number, sim->bus_id);
 3773         }
 3774 
 3775         TAILQ_INIT(&new_bus->et_entries);
 3776         new_bus->path_id = sim->path_id;
 3777         cam_sim_hold(sim);
 3778         new_bus->sim = sim;
 3779         timevalclear(&new_bus->last_reset);
 3780         new_bus->flags = 0;
 3781         new_bus->refcount = 1;  /* Held until a bus_deregister event */
 3782         new_bus->generation = 0;
 3783 
 3784         mtx_lock(&xsoftc.xpt_topo_lock);
 3785         old_bus = TAILQ_FIRST(&xsoftc.xpt_busses);
 3786         while (old_bus != NULL
 3787             && old_bus->path_id < new_bus->path_id)
 3788                 old_bus = TAILQ_NEXT(old_bus, links);
 3789         if (old_bus != NULL)
 3790                 TAILQ_INSERT_BEFORE(old_bus, new_bus, links);
 3791         else
 3792                 TAILQ_INSERT_TAIL(&xsoftc.xpt_busses, new_bus, links);
 3793         xsoftc.bus_generation++;
 3794         mtx_unlock(&xsoftc.xpt_topo_lock);
 3795 
 3796         /*
 3797          * Set a default transport so that a PATH_INQ can be issued to
 3798          * the SIM.  This will then allow for probing and attaching of
 3799          * a more appropriate transport.
 3800          */
 3801         new_bus->xport = &xport_default;
 3802 
 3803         status = xpt_compile_path(path, /*periph*/NULL, sim->path_id,
 3804                                   CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
 3805         if (status != CAM_REQ_CMP)
 3806                 printf("xpt_compile_path returned %d\n", status);
 3807 
 3808         xpt_setup_ccb(&cpi.ccb_h, path, CAM_PRIORITY_NORMAL);
 3809         cpi.ccb_h.func_code = XPT_PATH_INQ;
 3810         xpt_action((union ccb *)&cpi);
 3811 
 3812         if (cpi.ccb_h.status == CAM_REQ_CMP) {
 3813                 switch (cpi.transport) {
 3814                 case XPORT_SPI:
 3815                 case XPORT_SAS:
 3816                 case XPORT_FC:
 3817                 case XPORT_USB:
 3818                 case XPORT_ISCSI:
 3819                 case XPORT_PPB:
 3820                         new_bus->xport = scsi_get_xport();
 3821                         break;
 3822                 case XPORT_ATA:
 3823                 case XPORT_SATA:
 3824                         new_bus->xport = ata_get_xport();
 3825                         break;
 3826                 default:
 3827                         new_bus->xport = &xport_default;
 3828                         break;
 3829                 }
 3830         }
 3831 
 3832         /* Notify interested parties */
 3833         if (sim->path_id != CAM_XPT_PATH_ID) {
 3834                 union   ccb *scan_ccb;
 3835 
 3836                 xpt_async(AC_PATH_REGISTERED, path, &cpi);
 3837                 /* Initiate bus rescan. */
 3838                 scan_ccb = xpt_alloc_ccb_nowait();
 3839                 scan_ccb->ccb_h.path = path;
 3840                 scan_ccb->ccb_h.func_code = XPT_SCAN_BUS;
 3841                 scan_ccb->crcn.flags = 0;
 3842                 xpt_rescan(scan_ccb);
 3843         } else
 3844                 xpt_free_path(path);
 3845         return (CAM_SUCCESS);
 3846 }
 3847 
 3848 int32_t
 3849 xpt_bus_deregister(path_id_t pathid)
 3850 {
 3851         struct cam_path bus_path;
 3852         cam_status status;
 3853 
 3854         status = xpt_compile_path(&bus_path, NULL, pathid,
 3855                                   CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
 3856         if (status != CAM_REQ_CMP)
 3857                 return (status);
 3858 
 3859         xpt_async(AC_LOST_DEVICE, &bus_path, NULL);
 3860         xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL);
 3861 
 3862         /* Release the reference count held while registered. */
 3863         xpt_release_bus(bus_path.bus);
 3864         xpt_release_path(&bus_path);
 3865 
 3866         return (CAM_REQ_CMP);
 3867 }
 3868 
 3869 static path_id_t
 3870 xptnextfreepathid(void)
 3871 {
 3872         struct cam_eb *bus;
 3873         path_id_t pathid;
 3874         const char *strval;
 3875 
 3876         pathid = 0;
 3877         mtx_lock(&xsoftc.xpt_topo_lock);
 3878         bus = TAILQ_FIRST(&xsoftc.xpt_busses);
 3879 retry:
 3880         /* Find an unoccupied pathid */
 3881         while (bus != NULL && bus->path_id <= pathid) {
 3882                 if (bus->path_id == pathid)
 3883                         pathid++;
 3884                 bus = TAILQ_NEXT(bus, links);
 3885         }
 3886         mtx_unlock(&xsoftc.xpt_topo_lock);
 3887 
 3888         /*
 3889          * Ensure that this pathid is not reserved for
 3890          * a bus that may be registered in the future.
 3891          */
 3892         if (resource_string_value("scbus", pathid, "at", &strval) == 0) {
 3893                 ++pathid;
 3894                 /* Start the search over */
 3895                 mtx_lock(&xsoftc.xpt_topo_lock);
 3896                 goto retry;
 3897         }
 3898         return (pathid);
 3899 }
 3900 
 3901 static path_id_t
 3902 xptpathid(const char *sim_name, int sim_unit, int sim_bus)
 3903 {
 3904         path_id_t pathid;
 3905         int i, dunit, val;
 3906         char buf[32];
 3907         const char *dname;
 3908 
 3909         pathid = CAM_XPT_PATH_ID;
 3910         snprintf(buf, sizeof(buf), "%s%d", sim_name, sim_unit);
 3911         i = 0;
 3912         while ((resource_find_match(&i, &dname, &dunit, "at", buf)) == 0) {
 3913                 if (strcmp(dname, "scbus")) {
 3914                         /* Avoid a bit of foot shooting. */
 3915                         continue;
 3916                 }
 3917                 if (dunit < 0)          /* unwired?! */
 3918                         continue;
 3919                 if (resource_int_value("scbus", dunit, "bus", &val) == 0) {
 3920                         if (sim_bus == val) {
 3921                                 pathid = dunit;
 3922                                 break;
 3923                         }
 3924                 } else if (sim_bus == 0) {
 3925                         /* Unspecified matches bus 0 */
 3926                         pathid = dunit;
 3927                         break;
 3928                 } else {
 3929                         printf("Ambiguous scbus configuration for %s%d "
 3930                                "bus %d, cannot wire down.  The kernel "
 3931                                "config entry for scbus%d should "
 3932                                "specify a controller bus.\n"
 3933                                "Scbus will be assigned dynamically.\n",
 3934                                sim_name, sim_unit, sim_bus, dunit);
 3935                         break;
 3936                 }
 3937         }
 3938 
 3939         if (pathid == CAM_XPT_PATH_ID)
 3940                 pathid = xptnextfreepathid();
 3941         return (pathid);
 3942 }
 3943 
 3944 void
 3945 xpt_async(u_int32_t async_code, struct cam_path *path, void *async_arg)
 3946 {
 3947         struct cam_eb *bus;
 3948         struct cam_et *target, *next_target;
 3949         struct cam_ed *device, *next_device;
 3950 
 3951         mtx_assert(path->bus->sim->mtx, MA_OWNED);
 3952 
 3953         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_async\n"));
 3954 
 3955         /*
 3956          * Most async events come from a CAM interrupt context.  In
 3957          * a few cases, the error recovery code at the peripheral layer,
 3958          * which may run from our SWI or a process context, may signal
 3959          * deferred events with a call to xpt_async.
 3960          */
 3961 
 3962         bus = path->bus;
 3963 
 3964         if (async_code == AC_BUS_RESET) {
 3965                 /* Update our notion of when the last reset occurred */
 3966                 microtime(&bus->last_reset);
 3967         }
 3968 
 3969         for (target = TAILQ_FIRST(&bus->et_entries);
 3970              target != NULL;
 3971              target = next_target) {
 3972 
 3973                 next_target = TAILQ_NEXT(target, links);
 3974 
 3975                 if (path->target != target
 3976                  && path->target->target_id != CAM_TARGET_WILDCARD
 3977                  && target->target_id != CAM_TARGET_WILDCARD)
 3978                         continue;
 3979 
 3980                 if (async_code == AC_SENT_BDR) {
 3981                         /* Update our notion of when the last reset occurred */
 3982                         microtime(&path->target->last_reset);
 3983                 }
 3984 
 3985                 for (device = TAILQ_FIRST(&target->ed_entries);
 3986                      device != NULL;
 3987                      device = next_device) {
 3988 
 3989                         next_device = TAILQ_NEXT(device, links);
 3990 
 3991                         if (path->device != device
 3992                          && path->device->lun_id != CAM_LUN_WILDCARD
 3993                          && device->lun_id != CAM_LUN_WILDCARD)
 3994                                 continue;
 3995                         /*
 3996                          * The async callback could free the device.
 3997                          * If it is a broadcast async, it doesn't hold
 3998                          * device reference, so take our own reference.
 3999                          */
 4000                         xpt_acquire_device(device);
 4001                         (*(bus->xport->async))(async_code, bus,
 4002                                                target, device,
 4003                                                async_arg);
 4004 
 4005                         xpt_async_bcast(&device->asyncs, async_code,
 4006                                         path, async_arg);
 4007                         xpt_release_device(device);
 4008                 }
 4009         }
 4010 
 4011         /*
 4012          * If this wasn't a fully wildcarded async, tell all
 4013          * clients that want all async events.
 4014          */
 4015         if (bus != xpt_periph->path->bus)
 4016                 xpt_async_bcast(&xpt_periph->path->device->asyncs, async_code,
 4017                                 path, async_arg);
 4018 }
 4019 
 4020 static void
 4021 xpt_async_bcast(struct async_list *async_head,
 4022                 u_int32_t async_code,
 4023                 struct cam_path *path, void *async_arg)
 4024 {
 4025         struct async_node *cur_entry;
 4026 
 4027         cur_entry = SLIST_FIRST(async_head);
 4028         while (cur_entry != NULL) {
 4029                 struct async_node *next_entry;
 4030                 /*
 4031                  * Grab the next list entry before we call the current
 4032                  * entry's callback.  This is because the callback function
 4033                  * can delete its async callback entry.
 4034                  */
 4035                 next_entry = SLIST_NEXT(cur_entry, links);
 4036                 if ((cur_entry->event_enable & async_code) != 0)
 4037                         cur_entry->callback(cur_entry->callback_arg,
 4038                                             async_code, path,
 4039                                             async_arg);
 4040                 cur_entry = next_entry;
 4041         }
 4042 }
 4043 
 4044 static void
 4045 xpt_dev_async_default(u_int32_t async_code, struct cam_eb *bus,
 4046                       struct cam_et *target, struct cam_ed *device,
 4047                       void *async_arg)
 4048 {
 4049         printf("%s called\n", __func__);
 4050 }
 4051 
 4052 u_int32_t
 4053 xpt_freeze_devq_rl(struct cam_path *path, cam_rl rl, u_int count)
 4054 {
 4055         struct cam_ed *dev = path->device;
 4056 
 4057         mtx_assert(path->bus->sim->mtx, MA_OWNED);
 4058         dev->sim->devq->alloc_openings +=
 4059             cam_ccbq_freeze(&dev->ccbq, rl, count);
 4060         /* Remove frozen device from allocq. */
 4061         if (device_is_alloc_queued(dev) &&
 4062             cam_ccbq_frozen(&dev->ccbq, CAM_PRIORITY_TO_RL(
 4063              CAMQ_GET_PRIO(&dev->drvq)))) {
 4064                 camq_remove(&dev->sim->devq->alloc_queue,
 4065                     dev->alloc_ccb_entry.pinfo.index);
 4066         }
 4067         /* Remove frozen device from sendq. */
 4068         if (device_is_send_queued(dev) &&
 4069             cam_ccbq_frozen_top(&dev->ccbq)) {
 4070                 camq_remove(&dev->sim->devq->send_queue,
 4071                     dev->send_ccb_entry.pinfo.index);
 4072         }
 4073         return (dev->ccbq.queue.qfrozen_cnt[rl]);
 4074 }
 4075 
 4076 u_int32_t
 4077 xpt_freeze_devq(struct cam_path *path, u_int count)
 4078 {
 4079 
 4080         return (xpt_freeze_devq_rl(path, 0, count));
 4081 }
 4082 
 4083 u_int32_t
 4084 xpt_freeze_simq(struct cam_sim *sim, u_int count)
 4085 {
 4086 
 4087         mtx_assert(sim->mtx, MA_OWNED);
 4088         sim->devq->send_queue.qfrozen_cnt[0] += count;
 4089         return (sim->devq->send_queue.qfrozen_cnt[0]);
 4090 }
 4091 
 4092 static void
 4093 xpt_release_devq_timeout(void *arg)
 4094 {
 4095         struct cam_ed *device;
 4096 
 4097         device = (struct cam_ed *)arg;
 4098 
 4099         xpt_release_devq_device(device, /*rl*/0, /*count*/1, /*run_queue*/TRUE);
 4100 }
 4101 
 4102 void
 4103 xpt_release_devq(struct cam_path *path, u_int count, int run_queue)
 4104 {
 4105         mtx_assert(path->bus->sim->mtx, MA_OWNED);
 4106 
 4107         xpt_release_devq_device(path->device, /*rl*/0, count, run_queue);
 4108 }
 4109 
 4110 void
 4111 xpt_release_devq_rl(struct cam_path *path, cam_rl rl, u_int count, int run_queue)
 4112 {
 4113         mtx_assert(path->bus->sim->mtx, MA_OWNED);
 4114 
 4115         xpt_release_devq_device(path->device, rl, count, run_queue);
 4116 }
 4117 
 4118 static void
 4119 xpt_release_devq_device(struct cam_ed *dev, cam_rl rl, u_int count, int run_queue)
 4120 {
 4121 
 4122         if (count > dev->ccbq.queue.qfrozen_cnt[rl]) {
 4123 #ifdef INVARIANTS
 4124                 printf("xpt_release_devq(%d): requested %u > present %u\n",
 4125                     rl, count, dev->ccbq.queue.qfrozen_cnt[rl]);
 4126 #endif
 4127                 count = dev->ccbq.queue.qfrozen_cnt[rl];
 4128         }
 4129         dev->sim->devq->alloc_openings -=
 4130             cam_ccbq_release(&dev->ccbq, rl, count);
 4131         if (cam_ccbq_frozen(&dev->ccbq, CAM_PRIORITY_TO_RL(
 4132             CAMQ_GET_PRIO(&dev->drvq))) == 0) {
 4133                 if (xpt_schedule_dev_allocq(dev->target->bus, dev))
 4134                         xpt_run_dev_allocq(dev->target->bus);
 4135         }
 4136         if (cam_ccbq_frozen_top(&dev->ccbq) == 0) {
 4137                 /*
 4138                  * No longer need to wait for a successful
 4139                  * command completion.
 4140                  */
 4141                 dev->flags &= ~CAM_DEV_REL_ON_COMPLETE;
 4142                 /*
 4143                  * Remove any timeouts that might be scheduled
 4144                  * to release this queue.
 4145                  */
 4146                 if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
 4147                         callout_stop(&dev->callout);
 4148                         dev->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING;
 4149                 }
 4150                 if (run_queue == 0)
 4151                         return;
 4152                 /*
 4153                  * Now that we are unfrozen schedule the
 4154                  * device so any pending transactions are
 4155                  * run.
 4156                  */
 4157                 if (xpt_schedule_dev_sendq(dev->target->bus, dev))
 4158                         xpt_run_dev_sendq(dev->target->bus);
 4159         }
 4160 }
 4161 
 4162 void
 4163 xpt_release_simq(struct cam_sim *sim, int run_queue)
 4164 {
 4165         struct  camq *sendq;
 4166 
 4167         mtx_assert(sim->mtx, MA_OWNED);
 4168         sendq = &(sim->devq->send_queue);
 4169         if (sendq->qfrozen_cnt[0] <= 0) {
 4170 #ifdef INVARIANTS
 4171                 printf("xpt_release_simq: requested 1 > present %u\n",
 4172                     sendq->qfrozen_cnt[0]);
 4173 #endif
 4174         } else
 4175                 sendq->qfrozen_cnt[0]--;
 4176         if (sendq->qfrozen_cnt[0] == 0) {
 4177                 /*
 4178                  * If there is a timeout scheduled to release this
 4179                  * sim queue, remove it.  The queue frozen count is
 4180                  * already at 0.
 4181                  */
 4182                 if ((sim->flags & CAM_SIM_REL_TIMEOUT_PENDING) != 0){
 4183                         callout_stop(&sim->callout);
 4184                         sim->flags &= ~CAM_SIM_REL_TIMEOUT_PENDING;
 4185                 }
 4186                 if (run_queue) {
 4187                         struct cam_eb *bus;
 4188 
 4189                         /*
 4190                          * Now that we are unfrozen run the send queue.
 4191                          */
 4192                         bus = xpt_find_bus(sim->path_id);
 4193                         xpt_run_dev_sendq(bus);
 4194                         xpt_release_bus(bus);
 4195                 }
 4196         }
 4197 }
 4198 
 4199 /*
 4200  * XXX Appears to be unused.
 4201  */
 4202 static void
 4203 xpt_release_simq_timeout(void *arg)
 4204 {
 4205         struct cam_sim *sim;
 4206 
 4207         sim = (struct cam_sim *)arg;
 4208         xpt_release_simq(sim, /* run_queue */ TRUE);
 4209 }
 4210 
 4211 void
 4212 xpt_done(union ccb *done_ccb)
 4213 {
 4214         struct cam_sim *sim;
 4215         int     first;
 4216 
 4217         CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_done\n"));
 4218         if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) != 0) {
 4219                 /*
 4220                  * Queue up the request for handling by our SWI handler
 4221                  * any of the "non-immediate" type of ccbs.
 4222                  */
 4223                 sim = done_ccb->ccb_h.path->bus->sim;
 4224                 TAILQ_INSERT_TAIL(&sim->sim_doneq, &done_ccb->ccb_h,
 4225                     sim_links.tqe);
 4226                 done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX;
 4227                 if ((sim->flags & CAM_SIM_ON_DONEQ) == 0) {
 4228                         mtx_lock(&cam_simq_lock);
 4229                         first = TAILQ_EMPTY(&cam_simq);
 4230                         TAILQ_INSERT_TAIL(&cam_simq, sim, links);
 4231                         mtx_unlock(&cam_simq_lock);
 4232                         sim->flags |= CAM_SIM_ON_DONEQ;
 4233                         if (first)
 4234                                 swi_sched(cambio_ih, 0);
 4235                 }
 4236         }
 4237 }
 4238 
 4239 union ccb *
 4240 xpt_alloc_ccb()
 4241 {
 4242         union ccb *new_ccb;
 4243 
 4244         new_ccb = malloc(sizeof(*new_ccb), M_CAMXPT, M_ZERO|M_WAITOK);
 4245         return (new_ccb);
 4246 }
 4247 
 4248 union ccb *
 4249 xpt_alloc_ccb_nowait()
 4250 {
 4251         union ccb *new_ccb;
 4252 
 4253         new_ccb = malloc(sizeof(*new_ccb), M_CAMXPT, M_ZERO|M_NOWAIT);
 4254         return (new_ccb);
 4255 }
 4256 
 4257 void
 4258 xpt_free_ccb(union ccb *free_ccb)
 4259 {
 4260         free(free_ccb, M_CAMXPT);
 4261 }
 4262 
 4263 
 4264 
 4265 /* Private XPT functions */
 4266 
 4267 /*
 4268  * Get a CAM control block for the caller. Charge the structure to the device
 4269  * referenced by the path.  If the this device has no 'credits' then the
 4270  * device already has the maximum number of outstanding operations under way
 4271  * and we return NULL. If we don't have sufficient resources to allocate more
 4272  * ccbs, we also return NULL.
 4273  */
 4274 static union ccb *
 4275 xpt_get_ccb(struct cam_ed *device)
 4276 {
 4277         union ccb *new_ccb;
 4278         struct cam_sim *sim;
 4279 
 4280         sim = device->sim;
 4281         if ((new_ccb = (union ccb *)SLIST_FIRST(&sim->ccb_freeq)) == NULL) {
 4282                 new_ccb = xpt_alloc_ccb_nowait();
 4283                 if (new_ccb == NULL) {
 4284                         return (NULL);
 4285                 }
 4286                 if ((sim->flags & CAM_SIM_MPSAFE) == 0)
 4287                         callout_handle_init(&new_ccb->ccb_h.timeout_ch);
 4288                 SLIST_INSERT_HEAD(&sim->ccb_freeq, &new_ccb->ccb_h,
 4289                                   xpt_links.sle);
 4290                 sim->ccb_count++;
 4291         }
 4292         cam_ccbq_take_opening(&device->ccbq);
 4293         SLIST_REMOVE_HEAD(&sim->ccb_freeq, xpt_links.sle);
 4294         return (new_ccb);
 4295 }
 4296 
 4297 static void
 4298 xpt_release_bus(struct cam_eb *bus)
 4299 {
 4300 
 4301         mtx_lock(&xsoftc.xpt_topo_lock);
 4302         KASSERT(bus->refcount >= 1, ("bus->refcount >= 1"));
 4303         if ((--bus->refcount == 0)
 4304          && (TAILQ_FIRST(&bus->et_entries) == NULL)) {
 4305                 TAILQ_REMOVE(&xsoftc.xpt_busses, bus, links);
 4306                 xsoftc.bus_generation++;
 4307                 mtx_unlock(&xsoftc.xpt_topo_lock);
 4308                 cam_sim_release(bus->sim);
 4309                 free(bus, M_CAMXPT);
 4310         } else
 4311                 mtx_unlock(&xsoftc.xpt_topo_lock);
 4312 }
 4313 
 4314 static struct cam_et *
 4315 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id)
 4316 {
 4317         struct cam_et *target;
 4318 
 4319         target = (struct cam_et *)malloc(sizeof(*target), M_CAMXPT,
 4320                                          M_NOWAIT|M_ZERO);
 4321         if (target != NULL) {
 4322                 struct cam_et *cur_target;
 4323 
 4324                 TAILQ_INIT(&target->ed_entries);
 4325                 target->bus = bus;
 4326                 target->target_id = target_id;
 4327                 target->refcount = 1;
 4328                 target->generation = 0;
 4329                 target->luns = NULL;
 4330                 timevalclear(&target->last_reset);
 4331                 /*
 4332                  * Hold a reference to our parent bus so it
 4333                  * will not go away before we do.
 4334                  */
 4335                 mtx_lock(&xsoftc.xpt_topo_lock);
 4336                 bus->refcount++;
 4337                 mtx_unlock(&xsoftc.xpt_topo_lock);
 4338 
 4339                 /* Insertion sort into our bus's target list */
 4340                 cur_target = TAILQ_FIRST(&bus->et_entries);
 4341                 while (cur_target != NULL && cur_target->target_id < target_id)
 4342                         cur_target = TAILQ_NEXT(cur_target, links);
 4343 
 4344                 if (cur_target != NULL) {
 4345                         TAILQ_INSERT_BEFORE(cur_target, target, links);
 4346                 } else {
 4347                         TAILQ_INSERT_TAIL(&bus->et_entries, target, links);
 4348                 }
 4349                 bus->generation++;
 4350         }
 4351         return (target);
 4352 }
 4353 
 4354 static void
 4355 xpt_release_target(struct cam_et *target)
 4356 {
 4357 
 4358         if (target->refcount == 1) {
 4359                 if (TAILQ_FIRST(&target->ed_entries) == NULL) {
 4360                         TAILQ_REMOVE(&target->bus->et_entries, target, links);
 4361                         target->bus->generation++;
 4362                         xpt_release_bus(target->bus);
 4363                         if (target->luns)
 4364                                 free(target->luns, M_CAMXPT);
 4365                         free(target, M_CAMXPT);
 4366                 }
 4367         } else
 4368                 target->refcount--;
 4369 }
 4370 
 4371 static struct cam_ed *
 4372 xpt_alloc_device_default(struct cam_eb *bus, struct cam_et *target,
 4373                          lun_id_t lun_id)
 4374 {
 4375         struct cam_ed *device, *cur_device;
 4376 
 4377         device = xpt_alloc_device(bus, target, lun_id);
 4378         if (device == NULL)
 4379                 return (NULL);
 4380 
 4381         device->mintags = 1;
 4382         device->maxtags = 1;
 4383         bus->sim->max_ccbs += device->ccbq.devq_openings;
 4384         cur_device = TAILQ_FIRST(&target->ed_entries);
 4385         while (cur_device != NULL && cur_device->lun_id < lun_id)
 4386                 cur_device = TAILQ_NEXT(cur_device, links);
 4387         if (cur_device != NULL) {
 4388                 TAILQ_INSERT_BEFORE(cur_device, device, links);
 4389         } else {
 4390                 TAILQ_INSERT_TAIL(&target->ed_entries, device, links);
 4391         }
 4392         target->generation++;
 4393 
 4394         return (device);
 4395 }
 4396 
 4397 struct cam_ed *
 4398 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id)
 4399 {
 4400         struct     cam_ed *device;
 4401         struct     cam_devq *devq;
 4402         cam_status status;
 4403 
 4404         /* Make space for us in the device queue on our bus */
 4405         devq = bus->sim->devq;
 4406         status = cam_devq_resize(devq, devq->alloc_queue.array_size + 1);
 4407 
 4408         if (status != CAM_REQ_CMP) {
 4409                 device = NULL;
 4410         } else {
 4411                 device = (struct cam_ed *)malloc(sizeof(*device),
 4412                                                  M_CAMXPT, M_NOWAIT|M_ZERO);
 4413         }
 4414 
 4415         if (device != NULL) {
 4416                 cam_init_pinfo(&device->alloc_ccb_entry.pinfo);
 4417                 device->alloc_ccb_entry.device = device;
 4418                 cam_init_pinfo(&device->send_ccb_entry.pinfo);
 4419                 device->send_ccb_entry.device = device;
 4420                 device->target = target;
 4421                 device->lun_id = lun_id;
 4422                 device->sim = bus->sim;
 4423                 /* Initialize our queues */
 4424                 if (camq_init(&device->drvq, 0) != 0) {
 4425                         free(device, M_CAMXPT);
 4426                         return (NULL);
 4427                 }
 4428                 if (cam_ccbq_init(&device->ccbq,
 4429                                   bus->sim->max_dev_openings) != 0) {
 4430                         camq_fini(&device->drvq);
 4431                         free(device, M_CAMXPT);
 4432                         return (NULL);
 4433                 }
 4434                 SLIST_INIT(&device->asyncs);
 4435                 SLIST_INIT(&device->periphs);
 4436                 device->generation = 0;
 4437                 device->owner = NULL;
 4438                 device->flags = CAM_DEV_UNCONFIGURED;
 4439                 device->tag_delay_count = 0;
 4440                 device->tag_saved_openings = 0;
 4441                 device->refcount = 1;
 4442                 callout_init_mtx(&device->callout, bus->sim->mtx, 0);
 4443 
 4444                 /*
 4445                  * Hold a reference to our parent target so it
 4446                  * will not go away before we do.
 4447                  */
 4448                 target->refcount++;
 4449 
 4450         }
 4451         return (device);
 4452 }
 4453 
 4454 void
 4455 xpt_acquire_device(struct cam_ed *device)
 4456 {
 4457 
 4458         device->refcount++;
 4459 }
 4460 
 4461 void
 4462 xpt_release_device(struct cam_ed *device)
 4463 {
 4464 
 4465         if (device->refcount == 1) {
 4466                 struct cam_devq *devq;
 4467 
 4468                 if (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX
 4469                  || device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX)
 4470                         panic("Removing device while still queued for ccbs");
 4471 
 4472                 if ((device->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0)
 4473                         callout_stop(&device->callout);
 4474 
 4475                 TAILQ_REMOVE(&device->target->ed_entries, device,links);
 4476                 device->target->generation++;
 4477                 device->target->bus->sim->max_ccbs -= device->ccbq.devq_openings;
 4478                 /* Release our slot in the devq */
 4479                 devq = device->target->bus->sim->devq;
 4480                 cam_devq_resize(devq, devq->alloc_queue.array_size - 1);
 4481                 camq_fini(&device->drvq);
 4482                 cam_ccbq_fini(&device->ccbq);
 4483                 xpt_release_target(device->target);
 4484                 free(device, M_CAMXPT);
 4485         } else
 4486                 device->refcount--;
 4487 }
 4488 
 4489 u_int32_t
 4490 xpt_dev_ccbq_resize(struct cam_path *path, int newopenings)
 4491 {
 4492         int     diff;
 4493         int     result;
 4494         struct  cam_ed *dev;
 4495 
 4496         dev = path->device;
 4497 
 4498         diff = newopenings - (dev->ccbq.dev_active + dev->ccbq.dev_openings);
 4499         result = cam_ccbq_resize(&dev->ccbq, newopenings);
 4500         if (result == CAM_REQ_CMP && (diff < 0)) {
 4501                 dev->flags |= CAM_DEV_RESIZE_QUEUE_NEEDED;
 4502         }
 4503         if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
 4504          || (dev->inq_flags & SID_CmdQue) != 0)
 4505                 dev->tag_saved_openings = newopenings;
 4506         /* Adjust the global limit */
 4507         dev->sim->max_ccbs += diff;
 4508         return (result);
 4509 }
 4510 
 4511 static struct cam_eb *
 4512 xpt_find_bus(path_id_t path_id)
 4513 {
 4514         struct cam_eb *bus;
 4515 
 4516         mtx_lock(&xsoftc.xpt_topo_lock);
 4517         for (bus = TAILQ_FIRST(&xsoftc.xpt_busses);
 4518              bus != NULL;
 4519              bus = TAILQ_NEXT(bus, links)) {
 4520                 if (bus->path_id == path_id) {
 4521                         bus->refcount++;
 4522                         break;
 4523                 }
 4524         }
 4525         mtx_unlock(&xsoftc.xpt_topo_lock);
 4526         return (bus);
 4527 }
 4528 
 4529 static struct cam_et *
 4530 xpt_find_target(struct cam_eb *bus, target_id_t target_id)
 4531 {
 4532         struct cam_et *target;
 4533 
 4534         for (target = TAILQ_FIRST(&bus->et_entries);
 4535              target != NULL;
 4536              target = TAILQ_NEXT(target, links)) {
 4537                 if (target->target_id == target_id) {
 4538                         target->refcount++;
 4539                         break;
 4540                 }
 4541         }
 4542         return (target);
 4543 }
 4544 
 4545 static struct cam_ed *
 4546 xpt_find_device(struct cam_et *target, lun_id_t lun_id)
 4547 {
 4548         struct cam_ed *device;
 4549 
 4550         for (device = TAILQ_FIRST(&target->ed_entries);
 4551              device != NULL;
 4552              device = TAILQ_NEXT(device, links)) {
 4553                 if (device->lun_id == lun_id) {
 4554                         device->refcount++;
 4555                         break;
 4556                 }
 4557         }
 4558         return (device);
 4559 }
 4560 
 4561 void
 4562 xpt_start_tags(struct cam_path *path)
 4563 {
 4564         struct ccb_relsim crs;
 4565         struct cam_ed *device;
 4566         struct cam_sim *sim;
 4567         int    newopenings;
 4568 
 4569         device = path->device;
 4570         sim = path->bus->sim;
 4571         device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
 4572         xpt_freeze_devq(path, /*count*/1);
 4573         device->inq_flags |= SID_CmdQue;
 4574         if (device->tag_saved_openings != 0)
 4575                 newopenings = device->tag_saved_openings;
 4576         else
 4577                 newopenings = min(device->maxtags,
 4578                                   sim->max_tagged_dev_openings);
 4579         xpt_dev_ccbq_resize(path, newopenings);
 4580         xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL);
 4581         crs.ccb_h.func_code = XPT_REL_SIMQ;
 4582         crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
 4583         crs.openings
 4584             = crs.release_timeout
 4585             = crs.qfrozen_cnt
 4586             = 0;
 4587         xpt_action((union ccb *)&crs);
 4588 }
 4589 
 4590 void
 4591 xpt_stop_tags(struct cam_path *path)
 4592 {
 4593         struct ccb_relsim crs;
 4594         struct cam_ed *device;
 4595         struct cam_sim *sim;
 4596 
 4597         device = path->device;
 4598         sim = path->bus->sim;
 4599         device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
 4600         device->tag_delay_count = 0;
 4601         xpt_freeze_devq(path, /*count*/1);
 4602         device->inq_flags &= ~SID_CmdQue;
 4603         xpt_dev_ccbq_resize(path, sim->max_dev_openings);
 4604         xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL);
 4605         crs.ccb_h.func_code = XPT_REL_SIMQ;
 4606         crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
 4607         crs.openings
 4608             = crs.release_timeout
 4609             = crs.qfrozen_cnt
 4610             = 0;
 4611         xpt_action((union ccb *)&crs);
 4612 }
 4613 
 4614 static void
 4615 xpt_boot_delay(void *arg)
 4616 {
 4617 
 4618         xpt_release_boot();
 4619 }
 4620 
 4621 static void
 4622 xpt_config(void *arg)
 4623 {
 4624         /*
 4625          * Now that interrupts are enabled, go find our devices
 4626          */
 4627 
 4628 #ifdef CAMDEBUG
 4629         /* Setup debugging flags and path */
 4630 #ifdef CAM_DEBUG_BUS
 4631         if (cam_dflags != CAM_DEBUG_NONE) {
 4632                 /*
 4633                  * Locking is specifically omitted here.  No SIMs have
 4634                  * registered yet, so xpt_create_path will only be searching
 4635                  * empty lists of targets and devices.
 4636                  */
 4637                 if (xpt_create_path(&cam_dpath, xpt_periph,
 4638                                     CAM_DEBUG_BUS, CAM_DEBUG_TARGET,
 4639                                     CAM_DEBUG_LUN) != CAM_REQ_CMP) {
 4640                         printf("xpt_config: xpt_create_path() failed for debug"
 4641                                " target %d:%d:%d, debugging disabled\n",
 4642                                CAM_DEBUG_BUS, CAM_DEBUG_TARGET, CAM_DEBUG_LUN);
 4643                         cam_dflags = CAM_DEBUG_NONE;
 4644                 }
 4645         } else
 4646                 cam_dpath = NULL;
 4647 #else /* !CAM_DEBUG_BUS */
 4648         cam_dpath = NULL;
 4649 #endif /* CAM_DEBUG_BUS */
 4650 #endif /* CAMDEBUG */
 4651 
 4652         periphdriver_init(1);
 4653         xpt_hold_boot();
 4654         callout_init(&xsoftc.boot_callout, 1);
 4655         callout_reset(&xsoftc.boot_callout, hz * xsoftc.boot_delay / 1000,
 4656             xpt_boot_delay, NULL);
 4657         /* Fire up rescan thread. */
 4658         if (kproc_create(xpt_scanner_thread, NULL, NULL, 0, 0, "xpt_thrd")) {
 4659                 printf("xpt_config: failed to create rescan thread.\n");
 4660         }
 4661 }
 4662 
 4663 void
 4664 xpt_hold_boot(void)
 4665 {
 4666         xpt_lock_buses();
 4667         xsoftc.buses_to_config++;
 4668         xpt_unlock_buses();
 4669 }
 4670 
 4671 void
 4672 xpt_release_boot(void)
 4673 {
 4674         xpt_lock_buses();
 4675         xsoftc.buses_to_config--;
 4676         if (xsoftc.buses_to_config == 0 && xsoftc.buses_config_done == 0) {
 4677                 struct  xpt_task *task;
 4678 
 4679                 xsoftc.buses_config_done = 1;
 4680                 xpt_unlock_buses();
 4681                 /* Call manually because we don't have any busses */
 4682                 task = malloc(sizeof(struct xpt_task), M_CAMXPT, M_NOWAIT);
 4683                 if (task != NULL) {
 4684                         TASK_INIT(&task->task, 0, xpt_finishconfig_task, task);
 4685                         taskqueue_enqueue(taskqueue_thread, &task->task);
 4686                 }
 4687         } else
 4688                 xpt_unlock_buses();
 4689 }
 4690 
 4691 /*
 4692  * If the given device only has one peripheral attached to it, and if that
 4693  * peripheral is the passthrough driver, announce it.  This insures that the
 4694  * user sees some sort of announcement for every peripheral in their system.
 4695  */
 4696 static int
 4697 xptpassannouncefunc(struct cam_ed *device, void *arg)
 4698 {
 4699         struct cam_periph *periph;
 4700         int i;
 4701 
 4702         for (periph = SLIST_FIRST(&device->periphs), i = 0; periph != NULL;
 4703              periph = SLIST_NEXT(periph, periph_links), i++);
 4704 
 4705         periph = SLIST_FIRST(&device->periphs);
 4706         if ((i == 1)
 4707          && (strncmp(periph->periph_name, "pass", 4) == 0))
 4708                 xpt_announce_periph(periph, NULL);
 4709 
 4710         return(1);
 4711 }
 4712 
 4713 static void
 4714 xpt_finishconfig_task(void *context, int pending)
 4715 {
 4716 
 4717         periphdriver_init(2);
 4718         /*
 4719          * Check for devices with no "standard" peripheral driver
 4720          * attached.  For any devices like that, announce the
 4721          * passthrough driver so the user will see something.
 4722          */
 4723         xpt_for_all_devices(xptpassannouncefunc, NULL);
 4724 
 4725         /* Release our hook so that the boot can continue. */
 4726         config_intrhook_disestablish(xsoftc.xpt_config_hook);
 4727         free(xsoftc.xpt_config_hook, M_CAMXPT);
 4728         xsoftc.xpt_config_hook = NULL;
 4729 
 4730         free(context, M_CAMXPT);
 4731 }
 4732 
 4733 cam_status
 4734 xpt_register_async(int event, ac_callback_t *cbfunc, void *cbarg,
 4735                    struct cam_path *path)
 4736 {
 4737         struct ccb_setasync csa;
 4738         cam_status status;
 4739         int xptpath = 0;
 4740 
 4741         if (path == NULL) {
 4742                 mtx_lock(&xsoftc.xpt_lock);
 4743                 status = xpt_create_path(&path, /*periph*/NULL, CAM_XPT_PATH_ID,
 4744                                          CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
 4745                 if (status != CAM_REQ_CMP) {
 4746                         mtx_unlock(&xsoftc.xpt_lock);
 4747                         return (status);
 4748                 }
 4749                 xptpath = 1;
 4750         }
 4751 
 4752         xpt_setup_ccb(&csa.ccb_h, path, CAM_PRIORITY_NORMAL);
 4753         csa.ccb_h.func_code = XPT_SASYNC_CB;
 4754         csa.event_enable = event;
 4755         csa.callback = cbfunc;
 4756         csa.callback_arg = cbarg;
 4757         xpt_action((union ccb *)&csa);
 4758         status = csa.ccb_h.status;
 4759 
 4760         if (xptpath) {
 4761                 xpt_free_path(path);
 4762                 mtx_unlock(&xsoftc.xpt_lock);
 4763         }
 4764 
 4765         if ((status == CAM_REQ_CMP) &&
 4766             (csa.event_enable & AC_FOUND_DEVICE)) {
 4767                 /*
 4768                  * Get this peripheral up to date with all
 4769                  * the currently existing devices.
 4770                  */
 4771                 xpt_for_all_devices(xptsetasyncfunc, &csa);
 4772         }
 4773         if ((status == CAM_REQ_CMP) &&
 4774             (csa.event_enable & AC_PATH_REGISTERED)) {
 4775                 /*
 4776                  * Get this peripheral up to date with all
 4777                  * the currently existing busses.
 4778                  */
 4779                 xpt_for_all_busses(xptsetasyncbusfunc, &csa);
 4780         }
 4781 
 4782         return (status);
 4783 }
 4784 
 4785 static void
 4786 xptaction(struct cam_sim *sim, union ccb *work_ccb)
 4787 {
 4788         CAM_DEBUG(work_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xptaction\n"));
 4789 
 4790         switch (work_ccb->ccb_h.func_code) {
 4791         /* Common cases first */
 4792         case XPT_PATH_INQ:              /* Path routing inquiry */
 4793         {
 4794                 struct ccb_pathinq *cpi;
 4795 
 4796                 cpi = &work_ccb->cpi;
 4797                 cpi->version_num = 1; /* XXX??? */
 4798                 cpi->hba_inquiry = 0;
 4799                 cpi->target_sprt = 0;
 4800                 cpi->hba_misc = 0;
 4801                 cpi->hba_eng_cnt = 0;
 4802                 cpi->max_target = 0;
 4803                 cpi->max_lun = 0;
 4804                 cpi->initiator_id = 0;
 4805                 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
 4806                 strncpy(cpi->hba_vid, "", HBA_IDLEN);
 4807                 strncpy(cpi->dev_name, sim->sim_name, DEV_IDLEN);
 4808                 cpi->unit_number = sim->unit_number;
 4809                 cpi->bus_id = sim->bus_id;
 4810                 cpi->base_transfer_speed = 0;
 4811                 cpi->protocol = PROTO_UNSPECIFIED;
 4812                 cpi->protocol_version = PROTO_VERSION_UNSPECIFIED;
 4813                 cpi->transport = XPORT_UNSPECIFIED;
 4814                 cpi->transport_version = XPORT_VERSION_UNSPECIFIED;
 4815                 cpi->ccb_h.status = CAM_REQ_CMP;
 4816                 xpt_done(work_ccb);
 4817                 break;
 4818         }
 4819         default:
 4820                 work_ccb->ccb_h.status = CAM_REQ_INVALID;
 4821                 xpt_done(work_ccb);
 4822                 break;
 4823         }
 4824 }
 4825 
 4826 /*
 4827  * The xpt as a "controller" has no interrupt sources, so polling
 4828  * is a no-op.
 4829  */
 4830 static void
 4831 xptpoll(struct cam_sim *sim)
 4832 {
 4833 }
 4834 
 4835 void
 4836 xpt_lock_buses(void)
 4837 {
 4838         mtx_lock(&xsoftc.xpt_topo_lock);
 4839 }
 4840 
 4841 void
 4842 xpt_unlock_buses(void)
 4843 {
 4844         mtx_unlock(&xsoftc.xpt_topo_lock);
 4845 }
 4846 
 4847 static void
 4848 camisr(void *dummy)
 4849 {
 4850         cam_simq_t queue;
 4851         struct cam_sim *sim;
 4852 
 4853         mtx_lock(&cam_simq_lock);
 4854         TAILQ_INIT(&queue);
 4855         while (!TAILQ_EMPTY(&cam_simq)) {
 4856                 TAILQ_CONCAT(&queue, &cam_simq, links);
 4857                 mtx_unlock(&cam_simq_lock);
 4858 
 4859                 while ((sim = TAILQ_FIRST(&queue)) != NULL) {
 4860                         TAILQ_REMOVE(&queue, sim, links);
 4861                         CAM_SIM_LOCK(sim);
 4862                         sim->flags &= ~CAM_SIM_ON_DONEQ;
 4863                         camisr_runqueue(&sim->sim_doneq);
 4864                         CAM_SIM_UNLOCK(sim);
 4865                 }
 4866                 mtx_lock(&cam_simq_lock);
 4867         }
 4868         mtx_unlock(&cam_simq_lock);
 4869 }
 4870 
 4871 static void
 4872 camisr_runqueue(void *V_queue)
 4873 {
 4874         cam_isrq_t *queue = V_queue;
 4875         struct  ccb_hdr *ccb_h;
 4876 
 4877         while ((ccb_h = TAILQ_FIRST(queue)) != NULL) {
 4878                 int     runq;
 4879 
 4880                 TAILQ_REMOVE(queue, ccb_h, sim_links.tqe);
 4881                 ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
 4882 
 4883                 CAM_DEBUG(ccb_h->path, CAM_DEBUG_TRACE,
 4884                           ("camisr\n"));
 4885 
 4886                 runq = FALSE;
 4887 
 4888                 if (ccb_h->flags & CAM_HIGH_POWER) {
 4889                         struct highpowerlist    *hphead;
 4890                         union ccb               *send_ccb;
 4891 
 4892                         mtx_lock(&xsoftc.xpt_lock);
 4893                         hphead = &xsoftc.highpowerq;
 4894 
 4895                         send_ccb = (union ccb *)STAILQ_FIRST(hphead);
 4896 
 4897                         /*
 4898                          * Increment the count since this command is done.
 4899                          */
 4900                         xsoftc.num_highpower++;
 4901 
 4902                         /*
 4903                          * Any high powered commands queued up?
 4904                          */
 4905                         if (send_ccb != NULL) {
 4906 
 4907                                 STAILQ_REMOVE_HEAD(hphead, xpt_links.stqe);
 4908                                 mtx_unlock(&xsoftc.xpt_lock);
 4909 
 4910                                 xpt_release_devq(send_ccb->ccb_h.path,
 4911                                                  /*count*/1, /*runqueue*/TRUE);
 4912                         } else
 4913                                 mtx_unlock(&xsoftc.xpt_lock);
 4914                 }
 4915 
 4916                 if ((ccb_h->func_code & XPT_FC_USER_CCB) == 0) {
 4917                         struct cam_ed *dev;
 4918 
 4919                         dev = ccb_h->path->device;
 4920 
 4921                         cam_ccbq_ccb_done(&dev->ccbq, (union ccb *)ccb_h);
 4922                         ccb_h->path->bus->sim->devq->send_active--;
 4923                         ccb_h->path->bus->sim->devq->send_openings++;
 4924                         runq = TRUE;
 4925 
 4926                         if (((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0
 4927                           && (ccb_h->status&CAM_STATUS_MASK) != CAM_REQUEUE_REQ)
 4928                          || ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
 4929                           && (dev->ccbq.dev_active == 0))) {
 4930                                 xpt_release_devq(ccb_h->path, /*count*/1,
 4931                                                  /*run_queue*/FALSE);
 4932                         }
 4933 
 4934                         if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
 4935                          && (--dev->tag_delay_count == 0))
 4936                                 xpt_start_tags(ccb_h->path);
 4937                         if (!device_is_send_queued(dev)) {
 4938                                 (void)xpt_schedule_dev_sendq(ccb_h->path->bus, 
 4939                                                              dev);
 4940                         }
 4941                 }
 4942 
 4943                 if (ccb_h->status & CAM_RELEASE_SIMQ) {
 4944                         xpt_release_simq(ccb_h->path->bus->sim,
 4945                                          /*run_queue*/TRUE);
 4946                         ccb_h->status &= ~CAM_RELEASE_SIMQ;
 4947                         runq = FALSE;
 4948                 }
 4949 
 4950                 if ((ccb_h->flags & CAM_DEV_QFRZDIS)
 4951                  && (ccb_h->status & CAM_DEV_QFRZN)) {
 4952                         xpt_release_devq(ccb_h->path, /*count*/1,
 4953                                          /*run_queue*/TRUE);
 4954                         ccb_h->status &= ~CAM_DEV_QFRZN;
 4955                 } else if (runq) {
 4956                         xpt_run_dev_sendq(ccb_h->path->bus);
 4957                 }
 4958 
 4959                 /* Call the peripheral driver's callback */
 4960                 (*ccb_h->cbfcnp)(ccb_h->path->periph, (union ccb *)ccb_h);
 4961         }
 4962 }

Cache object: 2452870ff99a21314b81ccef3c212634


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.