The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/cam/cam_xpt.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Implementation of the Common Access Method Transport (XPT) layer.
    3  *
    4  * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs.
    5  * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry.
    6  * All rights reserved.
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions, and the following disclaimer,
   13  *    without modification, immediately at the beginning of the file.
   14  * 2. The name of the author may not be used to endorse or promote products
   15  *    derived from this software without specific prior written permission.
   16  *
   17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   20  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
   21  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   27  * SUCH DAMAGE.
   28  */
   29 
   30 #include <sys/cdefs.h>
   31 __FBSDID("$FreeBSD: releng/11.2/sys/cam/cam_xpt.c 330926 2018-03-14 09:57:58Z tijl $");
   32 
   33 #include <sys/param.h>
   34 #include <sys/bus.h>
   35 #include <sys/systm.h>
   36 #include <sys/types.h>
   37 #include <sys/malloc.h>
   38 #include <sys/kernel.h>
   39 #include <sys/time.h>
   40 #include <sys/conf.h>
   41 #include <sys/fcntl.h>
   42 #include <sys/interrupt.h>
   43 #include <sys/proc.h>
   44 #include <sys/sbuf.h>
   45 #include <sys/smp.h>
   46 #include <sys/taskqueue.h>
   47 
   48 #include <sys/lock.h>
   49 #include <sys/mutex.h>
   50 #include <sys/sysctl.h>
   51 #include <sys/kthread.h>
   52 
   53 #include <cam/cam.h>
   54 #include <cam/cam_ccb.h>
   55 #include <cam/cam_periph.h>
   56 #include <cam/cam_queue.h>
   57 #include <cam/cam_sim.h>
   58 #include <cam/cam_xpt.h>
   59 #include <cam/cam_xpt_sim.h>
   60 #include <cam/cam_xpt_periph.h>
   61 #include <cam/cam_xpt_internal.h>
   62 #include <cam/cam_debug.h>
   63 #include <cam/cam_compat.h>
   64 
   65 #include <cam/scsi/scsi_all.h>
   66 #include <cam/scsi/scsi_message.h>
   67 #include <cam/scsi/scsi_pass.h>
   68 
   69 #include <machine/md_var.h>     /* geometry translation */
   70 #include <machine/stdarg.h>     /* for xpt_print below */
   71 
   72 #include "opt_cam.h"
   73 
   74 /*
   75  * This is the maximum number of high powered commands (e.g. start unit)
   76  * that can be outstanding at a particular time.
   77  */
   78 #ifndef CAM_MAX_HIGHPOWER
   79 #define CAM_MAX_HIGHPOWER  4
   80 #endif
   81 
   82 /* Datastructures internal to the xpt layer */
   83 MALLOC_DEFINE(M_CAMXPT, "CAM XPT", "CAM XPT buffers");
   84 MALLOC_DEFINE(M_CAMDEV, "CAM DEV", "CAM devices");
   85 MALLOC_DEFINE(M_CAMCCB, "CAM CCB", "CAM CCBs");
   86 MALLOC_DEFINE(M_CAMPATH, "CAM path", "CAM paths");
   87 
   88 /* Object for defering XPT actions to a taskqueue */
   89 struct xpt_task {
   90         struct task     task;
   91         void            *data1;
   92         uintptr_t       data2;
   93 };
   94 
   95 struct xpt_softc {
   96         uint32_t                xpt_generation;
   97 
   98         /* number of high powered commands that can go through right now */
   99         struct mtx              xpt_highpower_lock;
  100         STAILQ_HEAD(highpowerlist, cam_ed)      highpowerq;
  101         int                     num_highpower;
  102 
  103         /* queue for handling async rescan requests. */
  104         TAILQ_HEAD(, ccb_hdr) ccb_scanq;
  105         int buses_to_config;
  106         int buses_config_done;
  107 
  108         /* Registered busses */
  109         TAILQ_HEAD(,cam_eb)     xpt_busses;
  110         u_int                   bus_generation;
  111 
  112         struct intr_config_hook *xpt_config_hook;
  113 
  114         int                     boot_delay;
  115         struct callout          boot_callout;
  116 
  117         struct mtx              xpt_topo_lock;
  118         struct mtx              xpt_lock;
  119         struct taskqueue        *xpt_taskq;
  120 };
  121 
  122 typedef enum {
  123         DM_RET_COPY             = 0x01,
  124         DM_RET_FLAG_MASK        = 0x0f,
  125         DM_RET_NONE             = 0x00,
  126         DM_RET_STOP             = 0x10,
  127         DM_RET_DESCEND          = 0x20,
  128         DM_RET_ERROR            = 0x30,
  129         DM_RET_ACTION_MASK      = 0xf0
  130 } dev_match_ret;
  131 
  132 typedef enum {
  133         XPT_DEPTH_BUS,
  134         XPT_DEPTH_TARGET,
  135         XPT_DEPTH_DEVICE,
  136         XPT_DEPTH_PERIPH
  137 } xpt_traverse_depth;
  138 
  139 struct xpt_traverse_config {
  140         xpt_traverse_depth      depth;
  141         void                    *tr_func;
  142         void                    *tr_arg;
  143 };
  144 
  145 typedef int     xpt_busfunc_t (struct cam_eb *bus, void *arg);
  146 typedef int     xpt_targetfunc_t (struct cam_et *target, void *arg);
  147 typedef int     xpt_devicefunc_t (struct cam_ed *device, void *arg);
  148 typedef int     xpt_periphfunc_t (struct cam_periph *periph, void *arg);
  149 typedef int     xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg);
  150 
  151 /* Transport layer configuration information */
  152 static struct xpt_softc xsoftc;
  153 
  154 MTX_SYSINIT(xpt_topo_init, &xsoftc.xpt_topo_lock, "XPT topology lock", MTX_DEF);
  155 
  156 SYSCTL_INT(_kern_cam, OID_AUTO, boot_delay, CTLFLAG_RDTUN,
  157            &xsoftc.boot_delay, 0, "Bus registration wait time");
  158 SYSCTL_UINT(_kern_cam, OID_AUTO, xpt_generation, CTLFLAG_RD,
  159             &xsoftc.xpt_generation, 0, "CAM peripheral generation count");
  160 
  161 struct cam_doneq {
  162         struct mtx_padalign     cam_doneq_mtx;
  163         STAILQ_HEAD(, ccb_hdr)  cam_doneq;
  164         int                     cam_doneq_sleep;
  165 };
  166 
  167 static struct cam_doneq cam_doneqs[MAXCPU];
  168 static int cam_num_doneqs;
  169 static struct proc *cam_proc;
  170 
  171 SYSCTL_INT(_kern_cam, OID_AUTO, num_doneqs, CTLFLAG_RDTUN,
  172            &cam_num_doneqs, 0, "Number of completion queues/threads");
  173 
  174 struct cam_periph *xpt_periph;
  175 
  176 static periph_init_t xpt_periph_init;
  177 
  178 static struct periph_driver xpt_driver =
  179 {
  180         xpt_periph_init, "xpt",
  181         TAILQ_HEAD_INITIALIZER(xpt_driver.units), /* generation */ 0,
  182         CAM_PERIPH_DRV_EARLY
  183 };
  184 
  185 PERIPHDRIVER_DECLARE(xpt, xpt_driver);
  186 
  187 static d_open_t xptopen;
  188 static d_close_t xptclose;
  189 static d_ioctl_t xptioctl;
  190 static d_ioctl_t xptdoioctl;
  191 
  192 static struct cdevsw xpt_cdevsw = {
  193         .d_version =    D_VERSION,
  194         .d_flags =      0,
  195         .d_open =       xptopen,
  196         .d_close =      xptclose,
  197         .d_ioctl =      xptioctl,
  198         .d_name =       "xpt",
  199 };
  200 
  201 /* Storage for debugging datastructures */
  202 struct cam_path *cam_dpath;
  203 u_int32_t cam_dflags = CAM_DEBUG_FLAGS;
  204 SYSCTL_UINT(_kern_cam, OID_AUTO, dflags, CTLFLAG_RWTUN,
  205         &cam_dflags, 0, "Enabled debug flags");
  206 u_int32_t cam_debug_delay = CAM_DEBUG_DELAY;
  207 SYSCTL_UINT(_kern_cam, OID_AUTO, debug_delay, CTLFLAG_RWTUN,
  208         &cam_debug_delay, 0, "Delay in us after each debug message");
  209 
  210 /* Our boot-time initialization hook */
  211 static int cam_module_event_handler(module_t, int /*modeventtype_t*/, void *);
  212 
  213 static moduledata_t cam_moduledata = {
  214         "cam",
  215         cam_module_event_handler,
  216         NULL
  217 };
  218 
  219 static int      xpt_init(void *);
  220 
  221 DECLARE_MODULE(cam, cam_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND);
  222 MODULE_VERSION(cam, 1);
  223 
  224 
  225 static void             xpt_async_bcast(struct async_list *async_head,
  226                                         u_int32_t async_code,
  227                                         struct cam_path *path,
  228                                         void *async_arg);
  229 static path_id_t xptnextfreepathid(void);
  230 static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus);
  231 static union ccb *xpt_get_ccb(struct cam_periph *periph);
  232 static union ccb *xpt_get_ccb_nowait(struct cam_periph *periph);
  233 static void      xpt_run_allocq(struct cam_periph *periph, int sleep);
  234 static void      xpt_run_allocq_task(void *context, int pending);
  235 static void      xpt_run_devq(struct cam_devq *devq);
  236 static timeout_t xpt_release_devq_timeout;
  237 static void      xpt_release_simq_timeout(void *arg) __unused;
  238 static void      xpt_acquire_bus(struct cam_eb *bus);
  239 static void      xpt_release_bus(struct cam_eb *bus);
  240 static uint32_t  xpt_freeze_devq_device(struct cam_ed *dev, u_int count);
  241 static int       xpt_release_devq_device(struct cam_ed *dev, u_int count,
  242                     int run_queue);
  243 static struct cam_et*
  244                  xpt_alloc_target(struct cam_eb *bus, target_id_t target_id);
  245 static void      xpt_acquire_target(struct cam_et *target);
  246 static void      xpt_release_target(struct cam_et *target);
  247 static struct cam_eb*
  248                  xpt_find_bus(path_id_t path_id);
  249 static struct cam_et*
  250                  xpt_find_target(struct cam_eb *bus, target_id_t target_id);
  251 static struct cam_ed*
  252                  xpt_find_device(struct cam_et *target, lun_id_t lun_id);
  253 static void      xpt_config(void *arg);
  254 static int       xpt_schedule_dev(struct camq *queue, cam_pinfo *dev_pinfo,
  255                                  u_int32_t new_priority);
  256 static xpt_devicefunc_t xptpassannouncefunc;
  257 static void      xptaction(struct cam_sim *sim, union ccb *work_ccb);
  258 static void      xptpoll(struct cam_sim *sim);
  259 static void      camisr_runqueue(void);
  260 static void      xpt_done_process(struct ccb_hdr *ccb_h);
  261 static void      xpt_done_td(void *);
  262 static dev_match_ret    xptbusmatch(struct dev_match_pattern *patterns,
  263                                     u_int num_patterns, struct cam_eb *bus);
  264 static dev_match_ret    xptdevicematch(struct dev_match_pattern *patterns,
  265                                        u_int num_patterns,
  266                                        struct cam_ed *device);
  267 static dev_match_ret    xptperiphmatch(struct dev_match_pattern *patterns,
  268                                        u_int num_patterns,
  269                                        struct cam_periph *periph);
  270 static xpt_busfunc_t    xptedtbusfunc;
  271 static xpt_targetfunc_t xptedttargetfunc;
  272 static xpt_devicefunc_t xptedtdevicefunc;
  273 static xpt_periphfunc_t xptedtperiphfunc;
  274 static xpt_pdrvfunc_t   xptplistpdrvfunc;
  275 static xpt_periphfunc_t xptplistperiphfunc;
  276 static int              xptedtmatch(struct ccb_dev_match *cdm);
  277 static int              xptperiphlistmatch(struct ccb_dev_match *cdm);
  278 static int              xptbustraverse(struct cam_eb *start_bus,
  279                                        xpt_busfunc_t *tr_func, void *arg);
  280 static int              xpttargettraverse(struct cam_eb *bus,
  281                                           struct cam_et *start_target,
  282                                           xpt_targetfunc_t *tr_func, void *arg);
  283 static int              xptdevicetraverse(struct cam_et *target,
  284                                           struct cam_ed *start_device,
  285                                           xpt_devicefunc_t *tr_func, void *arg);
  286 static int              xptperiphtraverse(struct cam_ed *device,
  287                                           struct cam_periph *start_periph,
  288                                           xpt_periphfunc_t *tr_func, void *arg);
  289 static int              xptpdrvtraverse(struct periph_driver **start_pdrv,
  290                                         xpt_pdrvfunc_t *tr_func, void *arg);
  291 static int              xptpdperiphtraverse(struct periph_driver **pdrv,
  292                                             struct cam_periph *start_periph,
  293                                             xpt_periphfunc_t *tr_func,
  294                                             void *arg);
  295 static xpt_busfunc_t    xptdefbusfunc;
  296 static xpt_targetfunc_t xptdeftargetfunc;
  297 static xpt_devicefunc_t xptdefdevicefunc;
  298 static xpt_periphfunc_t xptdefperiphfunc;
  299 static void             xpt_finishconfig_task(void *context, int pending);
  300 static void             xpt_dev_async_default(u_int32_t async_code,
  301                                               struct cam_eb *bus,
  302                                               struct cam_et *target,
  303                                               struct cam_ed *device,
  304                                               void *async_arg);
  305 static struct cam_ed *  xpt_alloc_device_default(struct cam_eb *bus,
  306                                                  struct cam_et *target,
  307                                                  lun_id_t lun_id);
  308 static xpt_devicefunc_t xptsetasyncfunc;
  309 static xpt_busfunc_t    xptsetasyncbusfunc;
  310 static cam_status       xptregister(struct cam_periph *periph,
  311                                     void *arg);
  312 static const char *     xpt_action_name(uint32_t action);
  313 static __inline int device_is_queued(struct cam_ed *device);
  314 
  315 static __inline int
  316 xpt_schedule_devq(struct cam_devq *devq, struct cam_ed *dev)
  317 {
  318         int     retval;
  319 
  320         mtx_assert(&devq->send_mtx, MA_OWNED);
  321         if ((dev->ccbq.queue.entries > 0) &&
  322             (dev->ccbq.dev_openings > 0) &&
  323             (dev->ccbq.queue.qfrozen_cnt == 0)) {
  324                 /*
  325                  * The priority of a device waiting for controller
  326                  * resources is that of the highest priority CCB
  327                  * enqueued.
  328                  */
  329                 retval =
  330                     xpt_schedule_dev(&devq->send_queue,
  331                                      &dev->devq_entry,
  332                                      CAMQ_GET_PRIO(&dev->ccbq.queue));
  333         } else {
  334                 retval = 0;
  335         }
  336         return (retval);
  337 }
  338 
  339 static __inline int
  340 device_is_queued(struct cam_ed *device)
  341 {
  342         return (device->devq_entry.index != CAM_UNQUEUED_INDEX);
  343 }
  344 
  345 static void
  346 xpt_periph_init()
  347 {
  348         make_dev(&xpt_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "xpt0");
  349 }
  350 
  351 static int
  352 xptopen(struct cdev *dev, int flags, int fmt, struct thread *td)
  353 {
  354 
  355         /*
  356          * Only allow read-write access.
  357          */
  358         if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0))
  359                 return(EPERM);
  360 
  361         /*
  362          * We don't allow nonblocking access.
  363          */
  364         if ((flags & O_NONBLOCK) != 0) {
  365                 printf("%s: can't do nonblocking access\n", devtoname(dev));
  366                 return(ENODEV);
  367         }
  368 
  369         return(0);
  370 }
  371 
  372 static int
  373 xptclose(struct cdev *dev, int flag, int fmt, struct thread *td)
  374 {
  375 
  376         return(0);
  377 }
  378 
  379 /*
  380  * Don't automatically grab the xpt softc lock here even though this is going
  381  * through the xpt device.  The xpt device is really just a back door for
  382  * accessing other devices and SIMs, so the right thing to do is to grab
  383  * the appropriate SIM lock once the bus/SIM is located.
  384  */
  385 static int
  386 xptioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
  387 {
  388         int error;
  389 
  390         if ((error = xptdoioctl(dev, cmd, addr, flag, td)) == ENOTTY) {
  391                 error = cam_compat_ioctl(dev, cmd, addr, flag, td, xptdoioctl);
  392         }
  393         return (error);
  394 }
  395         
  396 static int
  397 xptdoioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
  398 {
  399         int error;
  400 
  401         error = 0;
  402 
  403         switch(cmd) {
  404         /*
  405          * For the transport layer CAMIOCOMMAND ioctl, we really only want
  406          * to accept CCB types that don't quite make sense to send through a
  407          * passthrough driver. XPT_PATH_INQ is an exception to this, as stated
  408          * in the CAM spec.
  409          */
  410         case CAMIOCOMMAND: {
  411                 union ccb *ccb;
  412                 union ccb *inccb;
  413                 struct cam_eb *bus;
  414 
  415                 inccb = (union ccb *)addr;
  416 
  417                 if (inccb->ccb_h.flags & CAM_UNLOCKED)
  418                         return (EINVAL);
  419 
  420                 bus = xpt_find_bus(inccb->ccb_h.path_id);
  421                 if (bus == NULL)
  422                         return (EINVAL);
  423 
  424                 switch (inccb->ccb_h.func_code) {
  425                 case XPT_SCAN_BUS:
  426                 case XPT_RESET_BUS:
  427                         if (inccb->ccb_h.target_id != CAM_TARGET_WILDCARD ||
  428                             inccb->ccb_h.target_lun != CAM_LUN_WILDCARD) {
  429                                 xpt_release_bus(bus);
  430                                 return (EINVAL);
  431                         }
  432                         break;
  433                 case XPT_SCAN_TGT:
  434                         if (inccb->ccb_h.target_id == CAM_TARGET_WILDCARD ||
  435                             inccb->ccb_h.target_lun != CAM_LUN_WILDCARD) {
  436                                 xpt_release_bus(bus);
  437                                 return (EINVAL);
  438                         }
  439                         break;
  440                 default:
  441                         break;
  442                 }
  443 
  444                 switch(inccb->ccb_h.func_code) {
  445                 case XPT_SCAN_BUS:
  446                 case XPT_RESET_BUS:
  447                 case XPT_PATH_INQ:
  448                 case XPT_ENG_INQ:
  449                 case XPT_SCAN_LUN:
  450                 case XPT_SCAN_TGT:
  451 
  452                         ccb = xpt_alloc_ccb();
  453 
  454                         /*
  455                          * Create a path using the bus, target, and lun the
  456                          * user passed in.
  457                          */
  458                         if (xpt_create_path(&ccb->ccb_h.path, NULL,
  459                                             inccb->ccb_h.path_id,
  460                                             inccb->ccb_h.target_id,
  461                                             inccb->ccb_h.target_lun) !=
  462                                             CAM_REQ_CMP){
  463                                 error = EINVAL;
  464                                 xpt_free_ccb(ccb);
  465                                 break;
  466                         }
  467                         /* Ensure all of our fields are correct */
  468                         xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path,
  469                                       inccb->ccb_h.pinfo.priority);
  470                         xpt_merge_ccb(ccb, inccb);
  471                         xpt_path_lock(ccb->ccb_h.path);
  472                         cam_periph_runccb(ccb, NULL, 0, 0, NULL);
  473                         xpt_path_unlock(ccb->ccb_h.path);
  474                         bcopy(ccb, inccb, sizeof(union ccb));
  475                         xpt_free_path(ccb->ccb_h.path);
  476                         xpt_free_ccb(ccb);
  477                         break;
  478 
  479                 case XPT_DEBUG: {
  480                         union ccb ccb;
  481 
  482                         /*
  483                          * This is an immediate CCB, so it's okay to
  484                          * allocate it on the stack.
  485                          */
  486 
  487                         /*
  488                          * Create a path using the bus, target, and lun the
  489                          * user passed in.
  490                          */
  491                         if (xpt_create_path(&ccb.ccb_h.path, NULL,
  492                                             inccb->ccb_h.path_id,
  493                                             inccb->ccb_h.target_id,
  494                                             inccb->ccb_h.target_lun) !=
  495                                             CAM_REQ_CMP){
  496                                 error = EINVAL;
  497                                 break;
  498                         }
  499                         /* Ensure all of our fields are correct */
  500                         xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path,
  501                                       inccb->ccb_h.pinfo.priority);
  502                         xpt_merge_ccb(&ccb, inccb);
  503                         xpt_action(&ccb);
  504                         bcopy(&ccb, inccb, sizeof(union ccb));
  505                         xpt_free_path(ccb.ccb_h.path);
  506                         break;
  507 
  508                 }
  509                 case XPT_DEV_MATCH: {
  510                         struct cam_periph_map_info mapinfo;
  511                         struct cam_path *old_path;
  512 
  513                         /*
  514                          * We can't deal with physical addresses for this
  515                          * type of transaction.
  516                          */
  517                         if ((inccb->ccb_h.flags & CAM_DATA_MASK) !=
  518                             CAM_DATA_VADDR) {
  519                                 error = EINVAL;
  520                                 break;
  521                         }
  522 
  523                         /*
  524                          * Save this in case the caller had it set to
  525                          * something in particular.
  526                          */
  527                         old_path = inccb->ccb_h.path;
  528 
  529                         /*
  530                          * We really don't need a path for the matching
  531                          * code.  The path is needed because of the
  532                          * debugging statements in xpt_action().  They
  533                          * assume that the CCB has a valid path.
  534                          */
  535                         inccb->ccb_h.path = xpt_periph->path;
  536 
  537                         bzero(&mapinfo, sizeof(mapinfo));
  538 
  539                         /*
  540                          * Map the pattern and match buffers into kernel
  541                          * virtual address space.
  542                          */
  543                         error = cam_periph_mapmem(inccb, &mapinfo, MAXPHYS);
  544 
  545                         if (error) {
  546                                 inccb->ccb_h.path = old_path;
  547                                 break;
  548                         }
  549 
  550                         /*
  551                          * This is an immediate CCB, we can send it on directly.
  552                          */
  553                         xpt_action(inccb);
  554 
  555                         /*
  556                          * Map the buffers back into user space.
  557                          */
  558                         cam_periph_unmapmem(inccb, &mapinfo);
  559 
  560                         inccb->ccb_h.path = old_path;
  561 
  562                         error = 0;
  563                         break;
  564                 }
  565                 default:
  566                         error = ENOTSUP;
  567                         break;
  568                 }
  569                 xpt_release_bus(bus);
  570                 break;
  571         }
  572         /*
  573          * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input,
  574          * with the periphal driver name and unit name filled in.  The other
  575          * fields don't really matter as input.  The passthrough driver name
  576          * ("pass"), and unit number are passed back in the ccb.  The current
  577          * device generation number, and the index into the device peripheral
  578          * driver list, and the status are also passed back.  Note that
  579          * since we do everything in one pass, unlike the XPT_GDEVLIST ccb,
  580          * we never return a status of CAM_GDEVLIST_LIST_CHANGED.  It is
  581          * (or rather should be) impossible for the device peripheral driver
  582          * list to change since we look at the whole thing in one pass, and
  583          * we do it with lock protection.
  584          *
  585          */
  586         case CAMGETPASSTHRU: {
  587                 union ccb *ccb;
  588                 struct cam_periph *periph;
  589                 struct periph_driver **p_drv;
  590                 char   *name;
  591                 u_int unit;
  592                 int base_periph_found;
  593 
  594                 ccb = (union ccb *)addr;
  595                 unit = ccb->cgdl.unit_number;
  596                 name = ccb->cgdl.periph_name;
  597                 base_periph_found = 0;
  598 
  599                 /*
  600                  * Sanity check -- make sure we don't get a null peripheral
  601                  * driver name.
  602                  */
  603                 if (*ccb->cgdl.periph_name == '\0') {
  604                         error = EINVAL;
  605                         break;
  606                 }
  607 
  608                 /* Keep the list from changing while we traverse it */
  609                 xpt_lock_buses();
  610 
  611                 /* first find our driver in the list of drivers */
  612                 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++)
  613                         if (strcmp((*p_drv)->driver_name, name) == 0)
  614                                 break;
  615 
  616                 if (*p_drv == NULL) {
  617                         xpt_unlock_buses();
  618                         ccb->ccb_h.status = CAM_REQ_CMP_ERR;
  619                         ccb->cgdl.status = CAM_GDEVLIST_ERROR;
  620                         *ccb->cgdl.periph_name = '\0';
  621                         ccb->cgdl.unit_number = 0;
  622                         error = ENOENT;
  623                         break;
  624                 }
  625 
  626                 /*
  627                  * Run through every peripheral instance of this driver
  628                  * and check to see whether it matches the unit passed
  629                  * in by the user.  If it does, get out of the loops and
  630                  * find the passthrough driver associated with that
  631                  * peripheral driver.
  632                  */
  633                 for (periph = TAILQ_FIRST(&(*p_drv)->units); periph != NULL;
  634                      periph = TAILQ_NEXT(periph, unit_links)) {
  635 
  636                         if (periph->unit_number == unit)
  637                                 break;
  638                 }
  639                 /*
  640                  * If we found the peripheral driver that the user passed
  641                  * in, go through all of the peripheral drivers for that
  642                  * particular device and look for a passthrough driver.
  643                  */
  644                 if (periph != NULL) {
  645                         struct cam_ed *device;
  646                         int i;
  647 
  648                         base_periph_found = 1;
  649                         device = periph->path->device;
  650                         for (i = 0, periph = SLIST_FIRST(&device->periphs);
  651                              periph != NULL;
  652                              periph = SLIST_NEXT(periph, periph_links), i++) {
  653                                 /*
  654                                  * Check to see whether we have a
  655                                  * passthrough device or not.
  656                                  */
  657                                 if (strcmp(periph->periph_name, "pass") == 0) {
  658                                         /*
  659                                          * Fill in the getdevlist fields.
  660                                          */
  661                                         strlcpy(ccb->cgdl.periph_name,
  662                                                periph->periph_name,
  663                                                sizeof(ccb->cgdl.periph_name));
  664                                         ccb->cgdl.unit_number =
  665                                                 periph->unit_number;
  666                                         if (SLIST_NEXT(periph, periph_links))
  667                                                 ccb->cgdl.status =
  668                                                         CAM_GDEVLIST_MORE_DEVS;
  669                                         else
  670                                                 ccb->cgdl.status =
  671                                                        CAM_GDEVLIST_LAST_DEVICE;
  672                                         ccb->cgdl.generation =
  673                                                 device->generation;
  674                                         ccb->cgdl.index = i;
  675                                         /*
  676                                          * Fill in some CCB header fields
  677                                          * that the user may want.
  678                                          */
  679                                         ccb->ccb_h.path_id =
  680                                                 periph->path->bus->path_id;
  681                                         ccb->ccb_h.target_id =
  682                                                 periph->path->target->target_id;
  683                                         ccb->ccb_h.target_lun =
  684                                                 periph->path->device->lun_id;
  685                                         ccb->ccb_h.status = CAM_REQ_CMP;
  686                                         break;
  687                                 }
  688                         }
  689                 }
  690 
  691                 /*
  692                  * If the periph is null here, one of two things has
  693                  * happened.  The first possibility is that we couldn't
  694                  * find the unit number of the particular peripheral driver
  695                  * that the user is asking about.  e.g. the user asks for
  696                  * the passthrough driver for "da11".  We find the list of
  697                  * "da" peripherals all right, but there is no unit 11.
  698                  * The other possibility is that we went through the list
  699                  * of peripheral drivers attached to the device structure,
  700                  * but didn't find one with the name "pass".  Either way,
  701                  * we return ENOENT, since we couldn't find something.
  702                  */
  703                 if (periph == NULL) {
  704                         ccb->ccb_h.status = CAM_REQ_CMP_ERR;
  705                         ccb->cgdl.status = CAM_GDEVLIST_ERROR;
  706                         *ccb->cgdl.periph_name = '\0';
  707                         ccb->cgdl.unit_number = 0;
  708                         error = ENOENT;
  709                         /*
  710                          * It is unfortunate that this is even necessary,
  711                          * but there are many, many clueless users out there.
  712                          * If this is true, the user is looking for the
  713                          * passthrough driver, but doesn't have one in his
  714                          * kernel.
  715                          */
  716                         if (base_periph_found == 1) {
  717                                 printf("xptioctl: pass driver is not in the "
  718                                        "kernel\n");
  719                                 printf("xptioctl: put \"device pass\" in "
  720                                        "your kernel config file\n");
  721                         }
  722                 }
  723                 xpt_unlock_buses();
  724                 break;
  725                 }
  726         default:
  727                 error = ENOTTY;
  728                 break;
  729         }
  730 
  731         return(error);
  732 }
  733 
  734 static int
  735 cam_module_event_handler(module_t mod, int what, void *arg)
  736 {
  737         int error;
  738 
  739         switch (what) {
  740         case MOD_LOAD:
  741                 if ((error = xpt_init(NULL)) != 0)
  742                         return (error);
  743                 break;
  744         case MOD_UNLOAD:
  745                 return EBUSY;
  746         default:
  747                 return EOPNOTSUPP;
  748         }
  749 
  750         return 0;
  751 }
  752 
  753 static struct xpt_proto *
  754 xpt_proto_find(cam_proto proto)
  755 {
  756         struct xpt_proto **pp;
  757 
  758         SET_FOREACH(pp, cam_xpt_proto_set) {
  759                 if ((*pp)->proto == proto)
  760                         return *pp;
  761         }
  762 
  763         return NULL;
  764 }
  765 
  766 static void
  767 xpt_rescan_done(struct cam_periph *periph, union ccb *done_ccb)
  768 {
  769 
  770         if (done_ccb->ccb_h.ppriv_ptr1 == NULL) {
  771                 xpt_free_path(done_ccb->ccb_h.path);
  772                 xpt_free_ccb(done_ccb);
  773         } else {
  774                 done_ccb->ccb_h.cbfcnp = done_ccb->ccb_h.ppriv_ptr1;
  775                 (*done_ccb->ccb_h.cbfcnp)(periph, done_ccb);
  776         }
  777         xpt_release_boot();
  778 }
  779 
  780 /* thread to handle bus rescans */
  781 static void
  782 xpt_scanner_thread(void *dummy)
  783 {
  784         union ccb       *ccb;
  785         struct cam_path  path;
  786 
  787         xpt_lock_buses();
  788         for (;;) {
  789                 if (TAILQ_EMPTY(&xsoftc.ccb_scanq))
  790                         msleep(&xsoftc.ccb_scanq, &xsoftc.xpt_topo_lock, PRIBIO,
  791                                "-", 0);
  792                 if ((ccb = (union ccb *)TAILQ_FIRST(&xsoftc.ccb_scanq)) != NULL) {
  793                         TAILQ_REMOVE(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe);
  794                         xpt_unlock_buses();
  795 
  796                         /*
  797                          * Since lock can be dropped inside and path freed
  798                          * by completion callback even before return here,
  799                          * take our own path copy for reference.
  800                          */
  801                         xpt_copy_path(&path, ccb->ccb_h.path);
  802                         xpt_path_lock(&path);
  803                         xpt_action(ccb);
  804                         xpt_path_unlock(&path);
  805                         xpt_release_path(&path);
  806 
  807                         xpt_lock_buses();
  808                 }
  809         }
  810 }
  811 
  812 void
  813 xpt_rescan(union ccb *ccb)
  814 {
  815         struct ccb_hdr *hdr;
  816 
  817         /* Prepare request */
  818         if (ccb->ccb_h.path->target->target_id == CAM_TARGET_WILDCARD &&
  819             ccb->ccb_h.path->device->lun_id == CAM_LUN_WILDCARD)
  820                 ccb->ccb_h.func_code = XPT_SCAN_BUS;
  821         else if (ccb->ccb_h.path->target->target_id != CAM_TARGET_WILDCARD &&
  822             ccb->ccb_h.path->device->lun_id == CAM_LUN_WILDCARD)
  823                 ccb->ccb_h.func_code = XPT_SCAN_TGT;
  824         else if (ccb->ccb_h.path->target->target_id != CAM_TARGET_WILDCARD &&
  825             ccb->ccb_h.path->device->lun_id != CAM_LUN_WILDCARD)
  826                 ccb->ccb_h.func_code = XPT_SCAN_LUN;
  827         else {
  828                 xpt_print(ccb->ccb_h.path, "illegal scan path\n");
  829                 xpt_free_path(ccb->ccb_h.path);
  830                 xpt_free_ccb(ccb);
  831                 return;
  832         }
  833         CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE,
  834             ("xpt_rescan: func %#x %s\n", ccb->ccb_h.func_code,
  835                 xpt_action_name(ccb->ccb_h.func_code)));
  836 
  837         ccb->ccb_h.ppriv_ptr1 = ccb->ccb_h.cbfcnp;
  838         ccb->ccb_h.cbfcnp = xpt_rescan_done;
  839         xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, CAM_PRIORITY_XPT);
  840         /* Don't make duplicate entries for the same paths. */
  841         xpt_lock_buses();
  842         if (ccb->ccb_h.ppriv_ptr1 == NULL) {
  843                 TAILQ_FOREACH(hdr, &xsoftc.ccb_scanq, sim_links.tqe) {
  844                         if (xpt_path_comp(hdr->path, ccb->ccb_h.path) == 0) {
  845                                 wakeup(&xsoftc.ccb_scanq);
  846                                 xpt_unlock_buses();
  847                                 xpt_print(ccb->ccb_h.path, "rescan already queued\n");
  848                                 xpt_free_path(ccb->ccb_h.path);
  849                                 xpt_free_ccb(ccb);
  850                                 return;
  851                         }
  852                 }
  853         }
  854         TAILQ_INSERT_TAIL(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe);
  855         xsoftc.buses_to_config++;
  856         wakeup(&xsoftc.ccb_scanq);
  857         xpt_unlock_buses();
  858 }
  859 
  860 /* Functions accessed by the peripheral drivers */
  861 static int
  862 xpt_init(void *dummy)
  863 {
  864         struct cam_sim *xpt_sim;
  865         struct cam_path *path;
  866         struct cam_devq *devq;
  867         cam_status status;
  868         int error, i;
  869 
  870         TAILQ_INIT(&xsoftc.xpt_busses);
  871         TAILQ_INIT(&xsoftc.ccb_scanq);
  872         STAILQ_INIT(&xsoftc.highpowerq);
  873         xsoftc.num_highpower = CAM_MAX_HIGHPOWER;
  874 
  875         mtx_init(&xsoftc.xpt_lock, "XPT lock", NULL, MTX_DEF);
  876         mtx_init(&xsoftc.xpt_highpower_lock, "XPT highpower lock", NULL, MTX_DEF);
  877         xsoftc.xpt_taskq = taskqueue_create("CAM XPT task", M_WAITOK,
  878             taskqueue_thread_enqueue, /*context*/&xsoftc.xpt_taskq);
  879 
  880 #ifdef CAM_BOOT_DELAY
  881         /*
  882          * Override this value at compile time to assist our users
  883          * who don't use loader to boot a kernel.
  884          */
  885         xsoftc.boot_delay = CAM_BOOT_DELAY;
  886 #endif
  887         /*
  888          * The xpt layer is, itself, the equivalent of a SIM.
  889          * Allow 16 ccbs in the ccb pool for it.  This should
  890          * give decent parallelism when we probe busses and
  891          * perform other XPT functions.
  892          */
  893         devq = cam_simq_alloc(16);
  894         xpt_sim = cam_sim_alloc(xptaction,
  895                                 xptpoll,
  896                                 "xpt",
  897                                 /*softc*/NULL,
  898                                 /*unit*/0,
  899                                 /*mtx*/&xsoftc.xpt_lock,
  900                                 /*max_dev_transactions*/0,
  901                                 /*max_tagged_dev_transactions*/0,
  902                                 devq);
  903         if (xpt_sim == NULL)
  904                 return (ENOMEM);
  905 
  906         mtx_lock(&xsoftc.xpt_lock);
  907         if ((status = xpt_bus_register(xpt_sim, NULL, 0)) != CAM_SUCCESS) {
  908                 mtx_unlock(&xsoftc.xpt_lock);
  909                 printf("xpt_init: xpt_bus_register failed with status %#x,"
  910                        " failing attach\n", status);
  911                 return (EINVAL);
  912         }
  913         mtx_unlock(&xsoftc.xpt_lock);
  914 
  915         /*
  916          * Looking at the XPT from the SIM layer, the XPT is
  917          * the equivalent of a peripheral driver.  Allocate
  918          * a peripheral driver entry for us.
  919          */
  920         if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID,
  921                                       CAM_TARGET_WILDCARD,
  922                                       CAM_LUN_WILDCARD)) != CAM_REQ_CMP) {
  923                 printf("xpt_init: xpt_create_path failed with status %#x,"
  924                        " failing attach\n", status);
  925                 return (EINVAL);
  926         }
  927         xpt_path_lock(path);
  928         cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO,
  929                          path, NULL, 0, xpt_sim);
  930         xpt_path_unlock(path);
  931         xpt_free_path(path);
  932 
  933         if (cam_num_doneqs < 1)
  934                 cam_num_doneqs = 1 + mp_ncpus / 6;
  935         else if (cam_num_doneqs > MAXCPU)
  936                 cam_num_doneqs = MAXCPU;
  937         for (i = 0; i < cam_num_doneqs; i++) {
  938                 mtx_init(&cam_doneqs[i].cam_doneq_mtx, "CAM doneq", NULL,
  939                     MTX_DEF);
  940                 STAILQ_INIT(&cam_doneqs[i].cam_doneq);
  941                 error = kproc_kthread_add(xpt_done_td, &cam_doneqs[i],
  942                     &cam_proc, NULL, 0, 0, "cam", "doneq%d", i);
  943                 if (error != 0) {
  944                         cam_num_doneqs = i;
  945                         break;
  946                 }
  947         }
  948         if (cam_num_doneqs < 1) {
  949                 printf("xpt_init: Cannot init completion queues "
  950                        "- failing attach\n");
  951                 return (ENOMEM);
  952         }
  953         /*
  954          * Register a callback for when interrupts are enabled.
  955          */
  956         xsoftc.xpt_config_hook =
  957             (struct intr_config_hook *)malloc(sizeof(struct intr_config_hook),
  958                                               M_CAMXPT, M_NOWAIT | M_ZERO);
  959         if (xsoftc.xpt_config_hook == NULL) {
  960                 printf("xpt_init: Cannot malloc config hook "
  961                        "- failing attach\n");
  962                 return (ENOMEM);
  963         }
  964         xsoftc.xpt_config_hook->ich_func = xpt_config;
  965         if (config_intrhook_establish(xsoftc.xpt_config_hook) != 0) {
  966                 free (xsoftc.xpt_config_hook, M_CAMXPT);
  967                 printf("xpt_init: config_intrhook_establish failed "
  968                        "- failing attach\n");
  969         }
  970 
  971         return (0);
  972 }
  973 
  974 static cam_status
  975 xptregister(struct cam_periph *periph, void *arg)
  976 {
  977         struct cam_sim *xpt_sim;
  978 
  979         if (periph == NULL) {
  980                 printf("xptregister: periph was NULL!!\n");
  981                 return(CAM_REQ_CMP_ERR);
  982         }
  983 
  984         xpt_sim = (struct cam_sim *)arg;
  985         xpt_sim->softc = periph;
  986         xpt_periph = periph;
  987         periph->softc = NULL;
  988 
  989         return(CAM_REQ_CMP);
  990 }
  991 
  992 int32_t
  993 xpt_add_periph(struct cam_periph *periph)
  994 {
  995         struct cam_ed *device;
  996         int32_t  status;
  997 
  998         TASK_INIT(&periph->periph_run_task, 0, xpt_run_allocq_task, periph);
  999         device = periph->path->device;
 1000         status = CAM_REQ_CMP;
 1001         if (device != NULL) {
 1002                 mtx_lock(&device->target->bus->eb_mtx);
 1003                 device->generation++;
 1004                 SLIST_INSERT_HEAD(&device->periphs, periph, periph_links);
 1005                 mtx_unlock(&device->target->bus->eb_mtx);
 1006                 atomic_add_32(&xsoftc.xpt_generation, 1);
 1007         }
 1008 
 1009         return (status);
 1010 }
 1011 
 1012 void
 1013 xpt_remove_periph(struct cam_periph *periph)
 1014 {
 1015         struct cam_ed *device;
 1016 
 1017         device = periph->path->device;
 1018         if (device != NULL) {
 1019                 mtx_lock(&device->target->bus->eb_mtx);
 1020                 device->generation++;
 1021                 SLIST_REMOVE(&device->periphs, periph, cam_periph, periph_links);
 1022                 mtx_unlock(&device->target->bus->eb_mtx);
 1023                 atomic_add_32(&xsoftc.xpt_generation, 1);
 1024         }
 1025 }
 1026 
 1027 
 1028 void
 1029 xpt_announce_periph(struct cam_periph *periph, char *announce_string)
 1030 {
 1031         struct  cam_path *path = periph->path;
 1032         struct  xpt_proto *proto;
 1033 
 1034         cam_periph_assert(periph, MA_OWNED);
 1035         periph->flags |= CAM_PERIPH_ANNOUNCED;
 1036 
 1037         printf("%s%d at %s%d bus %d scbus%d target %d lun %jx\n",
 1038                periph->periph_name, periph->unit_number,
 1039                path->bus->sim->sim_name,
 1040                path->bus->sim->unit_number,
 1041                path->bus->sim->bus_id,
 1042                path->bus->path_id,
 1043                path->target->target_id,
 1044                (uintmax_t)path->device->lun_id);
 1045         printf("%s%d: ", periph->periph_name, periph->unit_number);
 1046         proto = xpt_proto_find(path->device->protocol);
 1047         if (proto)
 1048                 proto->ops->announce(path->device);
 1049         else
 1050                 printf("%s%d: Unknown protocol device %d\n",
 1051                     periph->periph_name, periph->unit_number,
 1052                     path->device->protocol);
 1053         if (path->device->serial_num_len > 0) {
 1054                 /* Don't wrap the screen  - print only the first 60 chars */
 1055                 printf("%s%d: Serial Number %.60s\n", periph->periph_name,
 1056                        periph->unit_number, path->device->serial_num);
 1057         }
 1058         /* Announce transport details. */
 1059         path->bus->xport->ops->announce(periph);
 1060         /* Announce command queueing. */
 1061         if (path->device->inq_flags & SID_CmdQue
 1062          || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
 1063                 printf("%s%d: Command Queueing enabled\n",
 1064                        periph->periph_name, periph->unit_number);
 1065         }
 1066         /* Announce caller's details if they've passed in. */
 1067         if (announce_string != NULL)
 1068                 printf("%s%d: %s\n", periph->periph_name,
 1069                        periph->unit_number, announce_string);
 1070 }
 1071 
 1072 void
 1073 xpt_announce_quirks(struct cam_periph *periph, int quirks, char *bit_string)
 1074 {
 1075         if (quirks != 0) {
 1076                 printf("%s%d: quirks=0x%b\n", periph->periph_name,
 1077                     periph->unit_number, quirks, bit_string);
 1078         }
 1079 }
 1080 
 1081 void
 1082 xpt_denounce_periph(struct cam_periph *periph)
 1083 {
 1084         struct  cam_path *path = periph->path;
 1085         struct  xpt_proto *proto;
 1086 
 1087         cam_periph_assert(periph, MA_OWNED);
 1088         printf("%s%d at %s%d bus %d scbus%d target %d lun %jx\n",
 1089                periph->periph_name, periph->unit_number,
 1090                path->bus->sim->sim_name,
 1091                path->bus->sim->unit_number,
 1092                path->bus->sim->bus_id,
 1093                path->bus->path_id,
 1094                path->target->target_id,
 1095                (uintmax_t)path->device->lun_id);
 1096         printf("%s%d: ", periph->periph_name, periph->unit_number);
 1097         proto = xpt_proto_find(path->device->protocol);
 1098         if (proto)
 1099                 proto->ops->denounce(path->device);
 1100         else
 1101                 printf("%s%d: Unknown protocol device %d\n",
 1102                     periph->periph_name, periph->unit_number,
 1103                     path->device->protocol);
 1104         if (path->device->serial_num_len > 0)
 1105                 printf(" s/n %.60s", path->device->serial_num);
 1106         printf(" detached\n");
 1107 }
 1108 
 1109 
 1110 int
 1111 xpt_getattr(char *buf, size_t len, const char *attr, struct cam_path *path)
 1112 {
 1113         int ret = -1, l, o;
 1114         struct ccb_dev_advinfo cdai;
 1115         struct scsi_vpd_id_descriptor *idd;
 1116 
 1117         xpt_path_assert(path, MA_OWNED);
 1118 
 1119         memset(&cdai, 0, sizeof(cdai));
 1120         xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
 1121         cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
 1122         cdai.flags = CDAI_FLAG_NONE;
 1123         cdai.bufsiz = len;
 1124 
 1125         if (!strcmp(attr, "GEOM::ident"))
 1126                 cdai.buftype = CDAI_TYPE_SERIAL_NUM;
 1127         else if (!strcmp(attr, "GEOM::physpath"))
 1128                 cdai.buftype = CDAI_TYPE_PHYS_PATH;
 1129         else if (strcmp(attr, "GEOM::lunid") == 0 ||
 1130                  strcmp(attr, "GEOM::lunname") == 0) {
 1131                 cdai.buftype = CDAI_TYPE_SCSI_DEVID;
 1132                 cdai.bufsiz = CAM_SCSI_DEVID_MAXLEN;
 1133         } else
 1134                 goto out;
 1135 
 1136         cdai.buf = malloc(cdai.bufsiz, M_CAMXPT, M_NOWAIT|M_ZERO);
 1137         if (cdai.buf == NULL) {
 1138                 ret = ENOMEM;
 1139                 goto out;
 1140         }
 1141         xpt_action((union ccb *)&cdai); /* can only be synchronous */
 1142         if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
 1143                 cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE);
 1144         if (cdai.provsiz == 0)
 1145                 goto out;
 1146         if (cdai.buftype == CDAI_TYPE_SCSI_DEVID) {
 1147                 if (strcmp(attr, "GEOM::lunid") == 0) {
 1148                         idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf,
 1149                             cdai.provsiz, scsi_devid_is_lun_naa);
 1150                         if (idd == NULL)
 1151                                 idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf,
 1152                                     cdai.provsiz, scsi_devid_is_lun_eui64);
 1153                         if (idd == NULL)
 1154                                 idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf,
 1155                                     cdai.provsiz, scsi_devid_is_lun_uuid);
 1156                         if (idd == NULL)
 1157                                 idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf,
 1158                                     cdai.provsiz, scsi_devid_is_lun_md5);
 1159                 } else
 1160                         idd = NULL;
 1161                 if (idd == NULL)
 1162                         idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf,
 1163                             cdai.provsiz, scsi_devid_is_lun_t10);
 1164                 if (idd == NULL)
 1165                         idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf,
 1166                             cdai.provsiz, scsi_devid_is_lun_name);
 1167                 if (idd == NULL)
 1168                         goto out;
 1169                 ret = 0;
 1170                 if ((idd->proto_codeset & SVPD_ID_CODESET_MASK) == SVPD_ID_CODESET_ASCII) {
 1171                         if (idd->length < len) {
 1172                                 for (l = 0; l < idd->length; l++)
 1173                                         buf[l] = idd->identifier[l] ?
 1174                                             idd->identifier[l] : ' ';
 1175                                 buf[l] = 0;
 1176                         } else
 1177                                 ret = EFAULT;
 1178                 } else if ((idd->proto_codeset & SVPD_ID_CODESET_MASK) == SVPD_ID_CODESET_UTF8) {
 1179                         l = strnlen(idd->identifier, idd->length);
 1180                         if (l < len) {
 1181                                 bcopy(idd->identifier, buf, l);
 1182                                 buf[l] = 0;
 1183                         } else
 1184                                 ret = EFAULT;
 1185                 } else if ((idd->id_type & SVPD_ID_TYPE_MASK) == SVPD_ID_TYPE_UUID
 1186                     && idd->identifier[0] == 0x10) {
 1187                         if ((idd->length - 2) * 2 + 4 < len) {
 1188                                 for (l = 2, o = 0; l < idd->length; l++) {
 1189                                         if (l == 6 || l == 8 || l == 10 || l == 12)
 1190                                             o += sprintf(buf + o, "-");
 1191                                         o += sprintf(buf + o, "%02x",
 1192                                             idd->identifier[l]);
 1193                                 }
 1194                         } else
 1195                                 ret = EFAULT;
 1196                 } else {
 1197                         if (idd->length * 2 < len) {
 1198                                 for (l = 0; l < idd->length; l++)
 1199                                         sprintf(buf + l * 2, "%02x",
 1200                                             idd->identifier[l]);
 1201                         } else
 1202                                 ret = EFAULT;
 1203                 }
 1204         } else {
 1205                 ret = 0;
 1206                 if (strlcpy(buf, cdai.buf, len) >= len)
 1207                         ret = EFAULT;
 1208         }
 1209 
 1210 out:
 1211         if (cdai.buf != NULL)
 1212                 free(cdai.buf, M_CAMXPT);
 1213         return ret;
 1214 }
 1215 
 1216 static dev_match_ret
 1217 xptbusmatch(struct dev_match_pattern *patterns, u_int num_patterns,
 1218             struct cam_eb *bus)
 1219 {
 1220         dev_match_ret retval;
 1221         u_int i;
 1222 
 1223         retval = DM_RET_NONE;
 1224 
 1225         /*
 1226          * If we aren't given something to match against, that's an error.
 1227          */
 1228         if (bus == NULL)
 1229                 return(DM_RET_ERROR);
 1230 
 1231         /*
 1232          * If there are no match entries, then this bus matches no
 1233          * matter what.
 1234          */
 1235         if ((patterns == NULL) || (num_patterns == 0))
 1236                 return(DM_RET_DESCEND | DM_RET_COPY);
 1237 
 1238         for (i = 0; i < num_patterns; i++) {
 1239                 struct bus_match_pattern *cur_pattern;
 1240 
 1241                 /*
 1242                  * If the pattern in question isn't for a bus node, we
 1243                  * aren't interested.  However, we do indicate to the
 1244                  * calling routine that we should continue descending the
 1245                  * tree, since the user wants to match against lower-level
 1246                  * EDT elements.
 1247                  */
 1248                 if (patterns[i].type != DEV_MATCH_BUS) {
 1249                         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
 1250                                 retval |= DM_RET_DESCEND;
 1251                         continue;
 1252                 }
 1253 
 1254                 cur_pattern = &patterns[i].pattern.bus_pattern;
 1255 
 1256                 /*
 1257                  * If they want to match any bus node, we give them any
 1258                  * device node.
 1259                  */
 1260                 if (cur_pattern->flags == BUS_MATCH_ANY) {
 1261                         /* set the copy flag */
 1262                         retval |= DM_RET_COPY;
 1263 
 1264                         /*
 1265                          * If we've already decided on an action, go ahead
 1266                          * and return.
 1267                          */
 1268                         if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
 1269                                 return(retval);
 1270                 }
 1271 
 1272                 /*
 1273                  * Not sure why someone would do this...
 1274                  */
 1275                 if (cur_pattern->flags == BUS_MATCH_NONE)
 1276                         continue;
 1277 
 1278                 if (((cur_pattern->flags & BUS_MATCH_PATH) != 0)
 1279                  && (cur_pattern->path_id != bus->path_id))
 1280                         continue;
 1281 
 1282                 if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0)
 1283                  && (cur_pattern->bus_id != bus->sim->bus_id))
 1284                         continue;
 1285 
 1286                 if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0)
 1287                  && (cur_pattern->unit_number != bus->sim->unit_number))
 1288                         continue;
 1289 
 1290                 if (((cur_pattern->flags & BUS_MATCH_NAME) != 0)
 1291                  && (strncmp(cur_pattern->dev_name, bus->sim->sim_name,
 1292                              DEV_IDLEN) != 0))
 1293                         continue;
 1294 
 1295                 /*
 1296                  * If we get to this point, the user definitely wants
 1297                  * information on this bus.  So tell the caller to copy the
 1298                  * data out.
 1299                  */
 1300                 retval |= DM_RET_COPY;
 1301 
 1302                 /*
 1303                  * If the return action has been set to descend, then we
 1304                  * know that we've already seen a non-bus matching
 1305                  * expression, therefore we need to further descend the tree.
 1306                  * This won't change by continuing around the loop, so we
 1307                  * go ahead and return.  If we haven't seen a non-bus
 1308                  * matching expression, we keep going around the loop until
 1309                  * we exhaust the matching expressions.  We'll set the stop
 1310                  * flag once we fall out of the loop.
 1311                  */
 1312                 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
 1313                         return(retval);
 1314         }
 1315 
 1316         /*
 1317          * If the return action hasn't been set to descend yet, that means
 1318          * we haven't seen anything other than bus matching patterns.  So
 1319          * tell the caller to stop descending the tree -- the user doesn't
 1320          * want to match against lower level tree elements.
 1321          */
 1322         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
 1323                 retval |= DM_RET_STOP;
 1324 
 1325         return(retval);
 1326 }
 1327 
 1328 static dev_match_ret
 1329 xptdevicematch(struct dev_match_pattern *patterns, u_int num_patterns,
 1330                struct cam_ed *device)
 1331 {
 1332         dev_match_ret retval;
 1333         u_int i;
 1334 
 1335         retval = DM_RET_NONE;
 1336 
 1337         /*
 1338          * If we aren't given something to match against, that's an error.
 1339          */
 1340         if (device == NULL)
 1341                 return(DM_RET_ERROR);
 1342 
 1343         /*
 1344          * If there are no match entries, then this device matches no
 1345          * matter what.
 1346          */
 1347         if ((patterns == NULL) || (num_patterns == 0))
 1348                 return(DM_RET_DESCEND | DM_RET_COPY);
 1349 
 1350         for (i = 0; i < num_patterns; i++) {
 1351                 struct device_match_pattern *cur_pattern;
 1352                 struct scsi_vpd_device_id *device_id_page;
 1353 
 1354                 /*
 1355                  * If the pattern in question isn't for a device node, we
 1356                  * aren't interested.
 1357                  */
 1358                 if (patterns[i].type != DEV_MATCH_DEVICE) {
 1359                         if ((patterns[i].type == DEV_MATCH_PERIPH)
 1360                          && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE))
 1361                                 retval |= DM_RET_DESCEND;
 1362                         continue;
 1363                 }
 1364 
 1365                 cur_pattern = &patterns[i].pattern.device_pattern;
 1366 
 1367                 /* Error out if mutually exclusive options are specified. */ 
 1368                 if ((cur_pattern->flags & (DEV_MATCH_INQUIRY|DEV_MATCH_DEVID))
 1369                  == (DEV_MATCH_INQUIRY|DEV_MATCH_DEVID))
 1370                         return(DM_RET_ERROR);
 1371 
 1372                 /*
 1373                  * If they want to match any device node, we give them any
 1374                  * device node.
 1375                  */
 1376                 if (cur_pattern->flags == DEV_MATCH_ANY)
 1377                         goto copy_dev_node;
 1378 
 1379                 /*
 1380                  * Not sure why someone would do this...
 1381                  */
 1382                 if (cur_pattern->flags == DEV_MATCH_NONE)
 1383                         continue;
 1384 
 1385                 if (((cur_pattern->flags & DEV_MATCH_PATH) != 0)
 1386                  && (cur_pattern->path_id != device->target->bus->path_id))
 1387                         continue;
 1388 
 1389                 if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0)
 1390                  && (cur_pattern->target_id != device->target->target_id))
 1391                         continue;
 1392 
 1393                 if (((cur_pattern->flags & DEV_MATCH_LUN) != 0)
 1394                  && (cur_pattern->target_lun != device->lun_id))
 1395                         continue;
 1396 
 1397                 if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0)
 1398                  && (cam_quirkmatch((caddr_t)&device->inq_data,
 1399                                     (caddr_t)&cur_pattern->data.inq_pat,
 1400                                     1, sizeof(cur_pattern->data.inq_pat),
 1401                                     scsi_static_inquiry_match) == NULL))
 1402                         continue;
 1403 
 1404                 device_id_page = (struct scsi_vpd_device_id *)device->device_id;
 1405                 if (((cur_pattern->flags & DEV_MATCH_DEVID) != 0)
 1406                  && (device->device_id_len < SVPD_DEVICE_ID_HDR_LEN
 1407                   || scsi_devid_match((uint8_t *)device_id_page->desc_list,
 1408                                       device->device_id_len
 1409                                     - SVPD_DEVICE_ID_HDR_LEN,
 1410                                       cur_pattern->data.devid_pat.id,
 1411                                       cur_pattern->data.devid_pat.id_len) != 0))
 1412                         continue;
 1413 
 1414 copy_dev_node:
 1415                 /*
 1416                  * If we get to this point, the user definitely wants
 1417                  * information on this device.  So tell the caller to copy
 1418                  * the data out.
 1419                  */
 1420                 retval |= DM_RET_COPY;
 1421 
 1422                 /*
 1423                  * If the return action has been set to descend, then we
 1424                  * know that we've already seen a peripheral matching
 1425                  * expression, therefore we need to further descend the tree.
 1426                  * This won't change by continuing around the loop, so we
 1427                  * go ahead and return.  If we haven't seen a peripheral
 1428                  * matching expression, we keep going around the loop until
 1429                  * we exhaust the matching expressions.  We'll set the stop
 1430                  * flag once we fall out of the loop.
 1431                  */
 1432                 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
 1433                         return(retval);
 1434         }
 1435 
 1436         /*
 1437          * If the return action hasn't been set to descend yet, that means
 1438          * we haven't seen any peripheral matching patterns.  So tell the
 1439          * caller to stop descending the tree -- the user doesn't want to
 1440          * match against lower level tree elements.
 1441          */
 1442         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
 1443                 retval |= DM_RET_STOP;
 1444 
 1445         return(retval);
 1446 }
 1447 
 1448 /*
 1449  * Match a single peripheral against any number of match patterns.
 1450  */
 1451 static dev_match_ret
 1452 xptperiphmatch(struct dev_match_pattern *patterns, u_int num_patterns,
 1453                struct cam_periph *periph)
 1454 {
 1455         dev_match_ret retval;
 1456         u_int i;
 1457 
 1458         /*
 1459          * If we aren't given something to match against, that's an error.
 1460          */
 1461         if (periph == NULL)
 1462                 return(DM_RET_ERROR);
 1463 
 1464         /*
 1465          * If there are no match entries, then this peripheral matches no
 1466          * matter what.
 1467          */
 1468         if ((patterns == NULL) || (num_patterns == 0))
 1469                 return(DM_RET_STOP | DM_RET_COPY);
 1470 
 1471         /*
 1472          * There aren't any nodes below a peripheral node, so there's no
 1473          * reason to descend the tree any further.
 1474          */
 1475         retval = DM_RET_STOP;
 1476 
 1477         for (i = 0; i < num_patterns; i++) {
 1478                 struct periph_match_pattern *cur_pattern;
 1479 
 1480                 /*
 1481                  * If the pattern in question isn't for a peripheral, we
 1482                  * aren't interested.
 1483                  */
 1484                 if (patterns[i].type != DEV_MATCH_PERIPH)
 1485                         continue;
 1486 
 1487                 cur_pattern = &patterns[i].pattern.periph_pattern;
 1488 
 1489                 /*
 1490                  * If they want to match on anything, then we will do so.
 1491                  */
 1492                 if (cur_pattern->flags == PERIPH_MATCH_ANY) {
 1493                         /* set the copy flag */
 1494                         retval |= DM_RET_COPY;
 1495 
 1496                         /*
 1497                          * We've already set the return action to stop,
 1498                          * since there are no nodes below peripherals in
 1499                          * the tree.
 1500                          */
 1501                         return(retval);
 1502                 }
 1503 
 1504                 /*
 1505                  * Not sure why someone would do this...
 1506                  */
 1507                 if (cur_pattern->flags == PERIPH_MATCH_NONE)
 1508                         continue;
 1509 
 1510                 if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0)
 1511                  && (cur_pattern->path_id != periph->path->bus->path_id))
 1512                         continue;
 1513 
 1514                 /*
 1515                  * For the target and lun id's, we have to make sure the
 1516                  * target and lun pointers aren't NULL.  The xpt peripheral
 1517                  * has a wildcard target and device.
 1518                  */
 1519                 if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0)
 1520                  && ((periph->path->target == NULL)
 1521                  ||(cur_pattern->target_id != periph->path->target->target_id)))
 1522                         continue;
 1523 
 1524                 if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0)
 1525                  && ((periph->path->device == NULL)
 1526                  || (cur_pattern->target_lun != periph->path->device->lun_id)))
 1527                         continue;
 1528 
 1529                 if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0)
 1530                  && (cur_pattern->unit_number != periph->unit_number))
 1531                         continue;
 1532 
 1533                 if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0)
 1534                  && (strncmp(cur_pattern->periph_name, periph->periph_name,
 1535                              DEV_IDLEN) != 0))
 1536                         continue;
 1537 
 1538                 /*
 1539                  * If we get to this point, the user definitely wants
 1540                  * information on this peripheral.  So tell the caller to
 1541                  * copy the data out.
 1542                  */
 1543                 retval |= DM_RET_COPY;
 1544 
 1545                 /*
 1546                  * The return action has already been set to stop, since
 1547                  * peripherals don't have any nodes below them in the EDT.
 1548                  */
 1549                 return(retval);
 1550         }
 1551 
 1552         /*
 1553          * If we get to this point, the peripheral that was passed in
 1554          * doesn't match any of the patterns.
 1555          */
 1556         return(retval);
 1557 }
 1558 
 1559 static int
 1560 xptedtbusfunc(struct cam_eb *bus, void *arg)
 1561 {
 1562         struct ccb_dev_match *cdm;
 1563         struct cam_et *target;
 1564         dev_match_ret retval;
 1565 
 1566         cdm = (struct ccb_dev_match *)arg;
 1567 
 1568         /*
 1569          * If our position is for something deeper in the tree, that means
 1570          * that we've already seen this node.  So, we keep going down.
 1571          */
 1572         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 1573          && (cdm->pos.cookie.bus == bus)
 1574          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 1575          && (cdm->pos.cookie.target != NULL))
 1576                 retval = DM_RET_DESCEND;
 1577         else
 1578                 retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus);
 1579 
 1580         /*
 1581          * If we got an error, bail out of the search.
 1582          */
 1583         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
 1584                 cdm->status = CAM_DEV_MATCH_ERROR;
 1585                 return(0);
 1586         }
 1587 
 1588         /*
 1589          * If the copy flag is set, copy this bus out.
 1590          */
 1591         if (retval & DM_RET_COPY) {
 1592                 int spaceleft, j;
 1593 
 1594                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
 1595                         sizeof(struct dev_match_result));
 1596 
 1597                 /*
 1598                  * If we don't have enough space to put in another
 1599                  * match result, save our position and tell the
 1600                  * user there are more devices to check.
 1601                  */
 1602                 if (spaceleft < sizeof(struct dev_match_result)) {
 1603                         bzero(&cdm->pos, sizeof(cdm->pos));
 1604                         cdm->pos.position_type =
 1605                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS;
 1606 
 1607                         cdm->pos.cookie.bus = bus;
 1608                         cdm->pos.generations[CAM_BUS_GENERATION]=
 1609                                 xsoftc.bus_generation;
 1610                         cdm->status = CAM_DEV_MATCH_MORE;
 1611                         return(0);
 1612                 }
 1613                 j = cdm->num_matches;
 1614                 cdm->num_matches++;
 1615                 cdm->matches[j].type = DEV_MATCH_BUS;
 1616                 cdm->matches[j].result.bus_result.path_id = bus->path_id;
 1617                 cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id;
 1618                 cdm->matches[j].result.bus_result.unit_number =
 1619                         bus->sim->unit_number;
 1620                 strlcpy(cdm->matches[j].result.bus_result.dev_name,
 1621                         bus->sim->sim_name,
 1622                         sizeof(cdm->matches[j].result.bus_result.dev_name));
 1623         }
 1624 
 1625         /*
 1626          * If the user is only interested in busses, there's no
 1627          * reason to descend to the next level in the tree.
 1628          */
 1629         if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
 1630                 return(1);
 1631 
 1632         /*
 1633          * If there is a target generation recorded, check it to
 1634          * make sure the target list hasn't changed.
 1635          */
 1636         mtx_lock(&bus->eb_mtx);
 1637         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 1638          && (cdm->pos.cookie.bus == bus)
 1639          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 1640          && (cdm->pos.cookie.target != NULL)) {
 1641                 if ((cdm->pos.generations[CAM_TARGET_GENERATION] !=
 1642                     bus->generation)) {
 1643                         mtx_unlock(&bus->eb_mtx);
 1644                         cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
 1645                         return (0);
 1646                 }
 1647                 target = (struct cam_et *)cdm->pos.cookie.target;
 1648                 target->refcount++;
 1649         } else
 1650                 target = NULL;
 1651         mtx_unlock(&bus->eb_mtx);
 1652 
 1653         return (xpttargettraverse(bus, target, xptedttargetfunc, arg));
 1654 }
 1655 
 1656 static int
 1657 xptedttargetfunc(struct cam_et *target, void *arg)
 1658 {
 1659         struct ccb_dev_match *cdm;
 1660         struct cam_eb *bus;
 1661         struct cam_ed *device;
 1662 
 1663         cdm = (struct ccb_dev_match *)arg;
 1664         bus = target->bus;
 1665 
 1666         /*
 1667          * If there is a device list generation recorded, check it to
 1668          * make sure the device list hasn't changed.
 1669          */
 1670         mtx_lock(&bus->eb_mtx);
 1671         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 1672          && (cdm->pos.cookie.bus == bus)
 1673          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 1674          && (cdm->pos.cookie.target == target)
 1675          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
 1676          && (cdm->pos.cookie.device != NULL)) {
 1677                 if (cdm->pos.generations[CAM_DEV_GENERATION] !=
 1678                     target->generation) {
 1679                         mtx_unlock(&bus->eb_mtx);
 1680                         cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
 1681                         return(0);
 1682                 }
 1683                 device = (struct cam_ed *)cdm->pos.cookie.device;
 1684                 device->refcount++;
 1685         } else
 1686                 device = NULL;
 1687         mtx_unlock(&bus->eb_mtx);
 1688 
 1689         return (xptdevicetraverse(target, device, xptedtdevicefunc, arg));
 1690 }
 1691 
 1692 static int
 1693 xptedtdevicefunc(struct cam_ed *device, void *arg)
 1694 {
 1695         struct cam_eb *bus;
 1696         struct cam_periph *periph;
 1697         struct ccb_dev_match *cdm;
 1698         dev_match_ret retval;
 1699 
 1700         cdm = (struct ccb_dev_match *)arg;
 1701         bus = device->target->bus;
 1702 
 1703         /*
 1704          * If our position is for something deeper in the tree, that means
 1705          * that we've already seen this node.  So, we keep going down.
 1706          */
 1707         if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE)
 1708          && (cdm->pos.cookie.device == device)
 1709          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
 1710          && (cdm->pos.cookie.periph != NULL))
 1711                 retval = DM_RET_DESCEND;
 1712         else
 1713                 retval = xptdevicematch(cdm->patterns, cdm->num_patterns,
 1714                                         device);
 1715 
 1716         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
 1717                 cdm->status = CAM_DEV_MATCH_ERROR;
 1718                 return(0);
 1719         }
 1720 
 1721         /*
 1722          * If the copy flag is set, copy this device out.
 1723          */
 1724         if (retval & DM_RET_COPY) {
 1725                 int spaceleft, j;
 1726 
 1727                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
 1728                         sizeof(struct dev_match_result));
 1729 
 1730                 /*
 1731                  * If we don't have enough space to put in another
 1732                  * match result, save our position and tell the
 1733                  * user there are more devices to check.
 1734                  */
 1735                 if (spaceleft < sizeof(struct dev_match_result)) {
 1736                         bzero(&cdm->pos, sizeof(cdm->pos));
 1737                         cdm->pos.position_type =
 1738                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
 1739                                 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE;
 1740 
 1741                         cdm->pos.cookie.bus = device->target->bus;
 1742                         cdm->pos.generations[CAM_BUS_GENERATION]=
 1743                                 xsoftc.bus_generation;
 1744                         cdm->pos.cookie.target = device->target;
 1745                         cdm->pos.generations[CAM_TARGET_GENERATION] =
 1746                                 device->target->bus->generation;
 1747                         cdm->pos.cookie.device = device;
 1748                         cdm->pos.generations[CAM_DEV_GENERATION] =
 1749                                 device->target->generation;
 1750                         cdm->status = CAM_DEV_MATCH_MORE;
 1751                         return(0);
 1752                 }
 1753                 j = cdm->num_matches;
 1754                 cdm->num_matches++;
 1755                 cdm->matches[j].type = DEV_MATCH_DEVICE;
 1756                 cdm->matches[j].result.device_result.path_id =
 1757                         device->target->bus->path_id;
 1758                 cdm->matches[j].result.device_result.target_id =
 1759                         device->target->target_id;
 1760                 cdm->matches[j].result.device_result.target_lun =
 1761                         device->lun_id;
 1762                 cdm->matches[j].result.device_result.protocol =
 1763                         device->protocol;
 1764                 bcopy(&device->inq_data,
 1765                       &cdm->matches[j].result.device_result.inq_data,
 1766                       sizeof(struct scsi_inquiry_data));
 1767                 bcopy(&device->ident_data,
 1768                       &cdm->matches[j].result.device_result.ident_data,
 1769                       sizeof(struct ata_params));
 1770 
 1771                 /* Let the user know whether this device is unconfigured */
 1772                 if (device->flags & CAM_DEV_UNCONFIGURED)
 1773                         cdm->matches[j].result.device_result.flags =
 1774                                 DEV_RESULT_UNCONFIGURED;
 1775                 else
 1776                         cdm->matches[j].result.device_result.flags =
 1777                                 DEV_RESULT_NOFLAG;
 1778         }
 1779 
 1780         /*
 1781          * If the user isn't interested in peripherals, don't descend
 1782          * the tree any further.
 1783          */
 1784         if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
 1785                 return(1);
 1786 
 1787         /*
 1788          * If there is a peripheral list generation recorded, make sure
 1789          * it hasn't changed.
 1790          */
 1791         xpt_lock_buses();
 1792         mtx_lock(&bus->eb_mtx);
 1793         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 1794          && (cdm->pos.cookie.bus == bus)
 1795          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 1796          && (cdm->pos.cookie.target == device->target)
 1797          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
 1798          && (cdm->pos.cookie.device == device)
 1799          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
 1800          && (cdm->pos.cookie.periph != NULL)) {
 1801                 if (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
 1802                     device->generation) {
 1803                         mtx_unlock(&bus->eb_mtx);
 1804                         xpt_unlock_buses();
 1805                         cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
 1806                         return(0);
 1807                 }
 1808                 periph = (struct cam_periph *)cdm->pos.cookie.periph;
 1809                 periph->refcount++;
 1810         } else
 1811                 periph = NULL;
 1812         mtx_unlock(&bus->eb_mtx);
 1813         xpt_unlock_buses();
 1814 
 1815         return (xptperiphtraverse(device, periph, xptedtperiphfunc, arg));
 1816 }
 1817 
 1818 static int
 1819 xptedtperiphfunc(struct cam_periph *periph, void *arg)
 1820 {
 1821         struct ccb_dev_match *cdm;
 1822         dev_match_ret retval;
 1823 
 1824         cdm = (struct ccb_dev_match *)arg;
 1825 
 1826         retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
 1827 
 1828         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
 1829                 cdm->status = CAM_DEV_MATCH_ERROR;
 1830                 return(0);
 1831         }
 1832 
 1833         /*
 1834          * If the copy flag is set, copy this peripheral out.
 1835          */
 1836         if (retval & DM_RET_COPY) {
 1837                 int spaceleft, j;
 1838                 size_t l;
 1839 
 1840                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
 1841                         sizeof(struct dev_match_result));
 1842 
 1843                 /*
 1844                  * If we don't have enough space to put in another
 1845                  * match result, save our position and tell the
 1846                  * user there are more devices to check.
 1847                  */
 1848                 if (spaceleft < sizeof(struct dev_match_result)) {
 1849                         bzero(&cdm->pos, sizeof(cdm->pos));
 1850                         cdm->pos.position_type =
 1851                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
 1852                                 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE |
 1853                                 CAM_DEV_POS_PERIPH;
 1854 
 1855                         cdm->pos.cookie.bus = periph->path->bus;
 1856                         cdm->pos.generations[CAM_BUS_GENERATION]=
 1857                                 xsoftc.bus_generation;
 1858                         cdm->pos.cookie.target = periph->path->target;
 1859                         cdm->pos.generations[CAM_TARGET_GENERATION] =
 1860                                 periph->path->bus->generation;
 1861                         cdm->pos.cookie.device = periph->path->device;
 1862                         cdm->pos.generations[CAM_DEV_GENERATION] =
 1863                                 periph->path->target->generation;
 1864                         cdm->pos.cookie.periph = periph;
 1865                         cdm->pos.generations[CAM_PERIPH_GENERATION] =
 1866                                 periph->path->device->generation;
 1867                         cdm->status = CAM_DEV_MATCH_MORE;
 1868                         return(0);
 1869                 }
 1870 
 1871                 j = cdm->num_matches;
 1872                 cdm->num_matches++;
 1873                 cdm->matches[j].type = DEV_MATCH_PERIPH;
 1874                 cdm->matches[j].result.periph_result.path_id =
 1875                         periph->path->bus->path_id;
 1876                 cdm->matches[j].result.periph_result.target_id =
 1877                         periph->path->target->target_id;
 1878                 cdm->matches[j].result.periph_result.target_lun =
 1879                         periph->path->device->lun_id;
 1880                 cdm->matches[j].result.periph_result.unit_number =
 1881                         periph->unit_number;
 1882                 l = sizeof(cdm->matches[j].result.periph_result.periph_name);
 1883                 strlcpy(cdm->matches[j].result.periph_result.periph_name,
 1884                         periph->periph_name, l);
 1885         }
 1886 
 1887         return(1);
 1888 }
 1889 
 1890 static int
 1891 xptedtmatch(struct ccb_dev_match *cdm)
 1892 {
 1893         struct cam_eb *bus;
 1894         int ret;
 1895 
 1896         cdm->num_matches = 0;
 1897 
 1898         /*
 1899          * Check the bus list generation.  If it has changed, the user
 1900          * needs to reset everything and start over.
 1901          */
 1902         xpt_lock_buses();
 1903         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 1904          && (cdm->pos.cookie.bus != NULL)) {
 1905                 if (cdm->pos.generations[CAM_BUS_GENERATION] !=
 1906                     xsoftc.bus_generation) {
 1907                         xpt_unlock_buses();
 1908                         cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
 1909                         return(0);
 1910                 }
 1911                 bus = (struct cam_eb *)cdm->pos.cookie.bus;
 1912                 bus->refcount++;
 1913         } else
 1914                 bus = NULL;
 1915         xpt_unlock_buses();
 1916 
 1917         ret = xptbustraverse(bus, xptedtbusfunc, cdm);
 1918 
 1919         /*
 1920          * If we get back 0, that means that we had to stop before fully
 1921          * traversing the EDT.  It also means that one of the subroutines
 1922          * has set the status field to the proper value.  If we get back 1,
 1923          * we've fully traversed the EDT and copied out any matching entries.
 1924          */
 1925         if (ret == 1)
 1926                 cdm->status = CAM_DEV_MATCH_LAST;
 1927 
 1928         return(ret);
 1929 }
 1930 
 1931 static int
 1932 xptplistpdrvfunc(struct periph_driver **pdrv, void *arg)
 1933 {
 1934         struct cam_periph *periph;
 1935         struct ccb_dev_match *cdm;
 1936 
 1937         cdm = (struct ccb_dev_match *)arg;
 1938 
 1939         xpt_lock_buses();
 1940         if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
 1941          && (cdm->pos.cookie.pdrv == pdrv)
 1942          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
 1943          && (cdm->pos.cookie.periph != NULL)) {
 1944                 if (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
 1945                     (*pdrv)->generation) {
 1946                         xpt_unlock_buses();
 1947                         cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
 1948                         return(0);
 1949                 }
 1950                 periph = (struct cam_periph *)cdm->pos.cookie.periph;
 1951                 periph->refcount++;
 1952         } else
 1953                 periph = NULL;
 1954         xpt_unlock_buses();
 1955 
 1956         return (xptpdperiphtraverse(pdrv, periph, xptplistperiphfunc, arg));
 1957 }
 1958 
 1959 static int
 1960 xptplistperiphfunc(struct cam_periph *periph, void *arg)
 1961 {
 1962         struct ccb_dev_match *cdm;
 1963         dev_match_ret retval;
 1964 
 1965         cdm = (struct ccb_dev_match *)arg;
 1966 
 1967         retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
 1968 
 1969         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
 1970                 cdm->status = CAM_DEV_MATCH_ERROR;
 1971                 return(0);
 1972         }
 1973 
 1974         /*
 1975          * If the copy flag is set, copy this peripheral out.
 1976          */
 1977         if (retval & DM_RET_COPY) {
 1978                 int spaceleft, j;
 1979                 size_t l;
 1980 
 1981                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
 1982                         sizeof(struct dev_match_result));
 1983 
 1984                 /*
 1985                  * If we don't have enough space to put in another
 1986                  * match result, save our position and tell the
 1987                  * user there are more devices to check.
 1988                  */
 1989                 if (spaceleft < sizeof(struct dev_match_result)) {
 1990                         struct periph_driver **pdrv;
 1991 
 1992                         pdrv = NULL;
 1993                         bzero(&cdm->pos, sizeof(cdm->pos));
 1994                         cdm->pos.position_type =
 1995                                 CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR |
 1996                                 CAM_DEV_POS_PERIPH;
 1997 
 1998                         /*
 1999                          * This may look a bit non-sensical, but it is
 2000                          * actually quite logical.  There are very few
 2001                          * peripheral drivers, and bloating every peripheral
 2002                          * structure with a pointer back to its parent
 2003                          * peripheral driver linker set entry would cost
 2004                          * more in the long run than doing this quick lookup.
 2005                          */
 2006                         for (pdrv = periph_drivers; *pdrv != NULL; pdrv++) {
 2007                                 if (strcmp((*pdrv)->driver_name,
 2008                                     periph->periph_name) == 0)
 2009                                         break;
 2010                         }
 2011 
 2012                         if (*pdrv == NULL) {
 2013                                 cdm->status = CAM_DEV_MATCH_ERROR;
 2014                                 return(0);
 2015                         }
 2016 
 2017                         cdm->pos.cookie.pdrv = pdrv;
 2018                         /*
 2019                          * The periph generation slot does double duty, as
 2020                          * does the periph pointer slot.  They are used for
 2021                          * both edt and pdrv lookups and positioning.
 2022                          */
 2023                         cdm->pos.cookie.periph = periph;
 2024                         cdm->pos.generations[CAM_PERIPH_GENERATION] =
 2025                                 (*pdrv)->generation;
 2026                         cdm->status = CAM_DEV_MATCH_MORE;
 2027                         return(0);
 2028                 }
 2029 
 2030                 j = cdm->num_matches;
 2031                 cdm->num_matches++;
 2032                 cdm->matches[j].type = DEV_MATCH_PERIPH;
 2033                 cdm->matches[j].result.periph_result.path_id =
 2034                         periph->path->bus->path_id;
 2035 
 2036                 /*
 2037                  * The transport layer peripheral doesn't have a target or
 2038                  * lun.
 2039                  */
 2040                 if (periph->path->target)
 2041                         cdm->matches[j].result.periph_result.target_id =
 2042                                 periph->path->target->target_id;
 2043                 else
 2044                         cdm->matches[j].result.periph_result.target_id =
 2045                                 CAM_TARGET_WILDCARD;
 2046 
 2047                 if (periph->path->device)
 2048                         cdm->matches[j].result.periph_result.target_lun =
 2049                                 periph->path->device->lun_id;
 2050                 else
 2051                         cdm->matches[j].result.periph_result.target_lun =
 2052                                 CAM_LUN_WILDCARD;
 2053 
 2054                 cdm->matches[j].result.periph_result.unit_number =
 2055                         periph->unit_number;
 2056                 l = sizeof(cdm->matches[j].result.periph_result.periph_name);
 2057                 strlcpy(cdm->matches[j].result.periph_result.periph_name,
 2058                         periph->periph_name, l);
 2059         }
 2060 
 2061         return(1);
 2062 }
 2063 
 2064 static int
 2065 xptperiphlistmatch(struct ccb_dev_match *cdm)
 2066 {
 2067         int ret;
 2068 
 2069         cdm->num_matches = 0;
 2070 
 2071         /*
 2072          * At this point in the edt traversal function, we check the bus
 2073          * list generation to make sure that no busses have been added or
 2074          * removed since the user last sent a XPT_DEV_MATCH ccb through.
 2075          * For the peripheral driver list traversal function, however, we
 2076          * don't have to worry about new peripheral driver types coming or
 2077          * going; they're in a linker set, and therefore can't change
 2078          * without a recompile.
 2079          */
 2080 
 2081         if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
 2082          && (cdm->pos.cookie.pdrv != NULL))
 2083                 ret = xptpdrvtraverse(
 2084                                 (struct periph_driver **)cdm->pos.cookie.pdrv,
 2085                                 xptplistpdrvfunc, cdm);
 2086         else
 2087                 ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm);
 2088 
 2089         /*
 2090          * If we get back 0, that means that we had to stop before fully
 2091          * traversing the peripheral driver tree.  It also means that one of
 2092          * the subroutines has set the status field to the proper value.  If
 2093          * we get back 1, we've fully traversed the EDT and copied out any
 2094          * matching entries.
 2095          */
 2096         if (ret == 1)
 2097                 cdm->status = CAM_DEV_MATCH_LAST;
 2098 
 2099         return(ret);
 2100 }
 2101 
 2102 static int
 2103 xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg)
 2104 {
 2105         struct cam_eb *bus, *next_bus;
 2106         int retval;
 2107 
 2108         retval = 1;
 2109         if (start_bus)
 2110                 bus = start_bus;
 2111         else {
 2112                 xpt_lock_buses();
 2113                 bus = TAILQ_FIRST(&xsoftc.xpt_busses);
 2114                 if (bus == NULL) {
 2115                         xpt_unlock_buses();
 2116                         return (retval);
 2117                 }
 2118                 bus->refcount++;
 2119                 xpt_unlock_buses();
 2120         }
 2121         for (; bus != NULL; bus = next_bus) {
 2122                 retval = tr_func(bus, arg);
 2123                 if (retval == 0) {
 2124                         xpt_release_bus(bus);
 2125                         break;
 2126                 }
 2127                 xpt_lock_buses();
 2128                 next_bus = TAILQ_NEXT(bus, links);
 2129                 if (next_bus)
 2130                         next_bus->refcount++;
 2131                 xpt_unlock_buses();
 2132                 xpt_release_bus(bus);
 2133         }
 2134         return(retval);
 2135 }
 2136 
 2137 static int
 2138 xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target,
 2139                   xpt_targetfunc_t *tr_func, void *arg)
 2140 {
 2141         struct cam_et *target, *next_target;
 2142         int retval;
 2143 
 2144         retval = 1;
 2145         if (start_target)
 2146                 target = start_target;
 2147         else {
 2148                 mtx_lock(&bus->eb_mtx);
 2149                 target = TAILQ_FIRST(&bus->et_entries);
 2150                 if (target == NULL) {
 2151                         mtx_unlock(&bus->eb_mtx);
 2152                         return (retval);
 2153                 }
 2154                 target->refcount++;
 2155                 mtx_unlock(&bus->eb_mtx);
 2156         }
 2157         for (; target != NULL; target = next_target) {
 2158                 retval = tr_func(target, arg);
 2159                 if (retval == 0) {
 2160                         xpt_release_target(target);
 2161                         break;
 2162                 }
 2163                 mtx_lock(&bus->eb_mtx);
 2164                 next_target = TAILQ_NEXT(target, links);
 2165                 if (next_target)
 2166                         next_target->refcount++;
 2167                 mtx_unlock(&bus->eb_mtx);
 2168                 xpt_release_target(target);
 2169         }
 2170         return(retval);
 2171 }
 2172 
 2173 static int
 2174 xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device,
 2175                   xpt_devicefunc_t *tr_func, void *arg)
 2176 {
 2177         struct cam_eb *bus;
 2178         struct cam_ed *device, *next_device;
 2179         int retval;
 2180 
 2181         retval = 1;
 2182         bus = target->bus;
 2183         if (start_device)
 2184                 device = start_device;
 2185         else {
 2186                 mtx_lock(&bus->eb_mtx);
 2187                 device = TAILQ_FIRST(&target->ed_entries);
 2188                 if (device == NULL) {
 2189                         mtx_unlock(&bus->eb_mtx);
 2190                         return (retval);
 2191                 }
 2192                 device->refcount++;
 2193                 mtx_unlock(&bus->eb_mtx);
 2194         }
 2195         for (; device != NULL; device = next_device) {
 2196                 mtx_lock(&device->device_mtx);
 2197                 retval = tr_func(device, arg);
 2198                 mtx_unlock(&device->device_mtx);
 2199                 if (retval == 0) {
 2200                         xpt_release_device(device);
 2201                         break;
 2202                 }
 2203                 mtx_lock(&bus->eb_mtx);
 2204                 next_device = TAILQ_NEXT(device, links);
 2205                 if (next_device)
 2206                         next_device->refcount++;
 2207                 mtx_unlock(&bus->eb_mtx);
 2208                 xpt_release_device(device);
 2209         }
 2210         return(retval);
 2211 }
 2212 
 2213 static int
 2214 xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph,
 2215                   xpt_periphfunc_t *tr_func, void *arg)
 2216 {
 2217         struct cam_eb *bus;
 2218         struct cam_periph *periph, *next_periph;
 2219         int retval;
 2220 
 2221         retval = 1;
 2222 
 2223         bus = device->target->bus;
 2224         if (start_periph)
 2225                 periph = start_periph;
 2226         else {
 2227                 xpt_lock_buses();
 2228                 mtx_lock(&bus->eb_mtx);
 2229                 periph = SLIST_FIRST(&device->periphs);
 2230                 while (periph != NULL && (periph->flags & CAM_PERIPH_FREE) != 0)
 2231                         periph = SLIST_NEXT(periph, periph_links);
 2232                 if (periph == NULL) {
 2233                         mtx_unlock(&bus->eb_mtx);
 2234                         xpt_unlock_buses();
 2235                         return (retval);
 2236                 }
 2237                 periph->refcount++;
 2238                 mtx_unlock(&bus->eb_mtx);
 2239                 xpt_unlock_buses();
 2240         }
 2241         for (; periph != NULL; periph = next_periph) {
 2242                 retval = tr_func(periph, arg);
 2243                 if (retval == 0) {
 2244                         cam_periph_release_locked(periph);
 2245                         break;
 2246                 }
 2247                 xpt_lock_buses();
 2248                 mtx_lock(&bus->eb_mtx);
 2249                 next_periph = SLIST_NEXT(periph, periph_links);
 2250                 while (next_periph != NULL &&
 2251                     (next_periph->flags & CAM_PERIPH_FREE) != 0)
 2252                         next_periph = SLIST_NEXT(next_periph, periph_links);
 2253                 if (next_periph)
 2254                         next_periph->refcount++;
 2255                 mtx_unlock(&bus->eb_mtx);
 2256                 xpt_unlock_buses();
 2257                 cam_periph_release_locked(periph);
 2258         }
 2259         return(retval);
 2260 }
 2261 
 2262 static int
 2263 xptpdrvtraverse(struct periph_driver **start_pdrv,
 2264                 xpt_pdrvfunc_t *tr_func, void *arg)
 2265 {
 2266         struct periph_driver **pdrv;
 2267         int retval;
 2268 
 2269         retval = 1;
 2270 
 2271         /*
 2272          * We don't traverse the peripheral driver list like we do the
 2273          * other lists, because it is a linker set, and therefore cannot be
 2274          * changed during runtime.  If the peripheral driver list is ever
 2275          * re-done to be something other than a linker set (i.e. it can
 2276          * change while the system is running), the list traversal should
 2277          * be modified to work like the other traversal functions.
 2278          */
 2279         for (pdrv = (start_pdrv ? start_pdrv : periph_drivers);
 2280              *pdrv != NULL; pdrv++) {
 2281                 retval = tr_func(pdrv, arg);
 2282 
 2283                 if (retval == 0)
 2284                         return(retval);
 2285         }
 2286 
 2287         return(retval);
 2288 }
 2289 
 2290 static int
 2291 xptpdperiphtraverse(struct periph_driver **pdrv,
 2292                     struct cam_periph *start_periph,
 2293                     xpt_periphfunc_t *tr_func, void *arg)
 2294 {
 2295         struct cam_periph *periph, *next_periph;
 2296         int retval;
 2297 
 2298         retval = 1;
 2299 
 2300         if (start_periph)
 2301                 periph = start_periph;
 2302         else {
 2303                 xpt_lock_buses();
 2304                 periph = TAILQ_FIRST(&(*pdrv)->units);
 2305                 while (periph != NULL && (periph->flags & CAM_PERIPH_FREE) != 0)
 2306                         periph = TAILQ_NEXT(periph, unit_links);
 2307                 if (periph == NULL) {
 2308                         xpt_unlock_buses();
 2309                         return (retval);
 2310                 }
 2311                 periph->refcount++;
 2312                 xpt_unlock_buses();
 2313         }
 2314         for (; periph != NULL; periph = next_periph) {
 2315                 cam_periph_lock(periph);
 2316                 retval = tr_func(periph, arg);
 2317                 cam_periph_unlock(periph);
 2318                 if (retval == 0) {
 2319                         cam_periph_release(periph);
 2320                         break;
 2321                 }
 2322                 xpt_lock_buses();
 2323                 next_periph = TAILQ_NEXT(periph, unit_links);
 2324                 while (next_periph != NULL &&
 2325                     (next_periph->flags & CAM_PERIPH_FREE) != 0)
 2326                         next_periph = TAILQ_NEXT(next_periph, unit_links);
 2327                 if (next_periph)
 2328                         next_periph->refcount++;
 2329                 xpt_unlock_buses();
 2330                 cam_periph_release(periph);
 2331         }
 2332         return(retval);
 2333 }
 2334 
 2335 static int
 2336 xptdefbusfunc(struct cam_eb *bus, void *arg)
 2337 {
 2338         struct xpt_traverse_config *tr_config;
 2339 
 2340         tr_config = (struct xpt_traverse_config *)arg;
 2341 
 2342         if (tr_config->depth == XPT_DEPTH_BUS) {
 2343                 xpt_busfunc_t *tr_func;
 2344 
 2345                 tr_func = (xpt_busfunc_t *)tr_config->tr_func;
 2346 
 2347                 return(tr_func(bus, tr_config->tr_arg));
 2348         } else
 2349                 return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg));
 2350 }
 2351 
 2352 static int
 2353 xptdeftargetfunc(struct cam_et *target, void *arg)
 2354 {
 2355         struct xpt_traverse_config *tr_config;
 2356 
 2357         tr_config = (struct xpt_traverse_config *)arg;
 2358 
 2359         if (tr_config->depth == XPT_DEPTH_TARGET) {
 2360                 xpt_targetfunc_t *tr_func;
 2361 
 2362                 tr_func = (xpt_targetfunc_t *)tr_config->tr_func;
 2363 
 2364                 return(tr_func(target, tr_config->tr_arg));
 2365         } else
 2366                 return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg));
 2367 }
 2368 
 2369 static int
 2370 xptdefdevicefunc(struct cam_ed *device, void *arg)
 2371 {
 2372         struct xpt_traverse_config *tr_config;
 2373 
 2374         tr_config = (struct xpt_traverse_config *)arg;
 2375 
 2376         if (tr_config->depth == XPT_DEPTH_DEVICE) {
 2377                 xpt_devicefunc_t *tr_func;
 2378 
 2379                 tr_func = (xpt_devicefunc_t *)tr_config->tr_func;
 2380 
 2381                 return(tr_func(device, tr_config->tr_arg));
 2382         } else
 2383                 return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg));
 2384 }
 2385 
 2386 static int
 2387 xptdefperiphfunc(struct cam_periph *periph, void *arg)
 2388 {
 2389         struct xpt_traverse_config *tr_config;
 2390         xpt_periphfunc_t *tr_func;
 2391 
 2392         tr_config = (struct xpt_traverse_config *)arg;
 2393 
 2394         tr_func = (xpt_periphfunc_t *)tr_config->tr_func;
 2395 
 2396         /*
 2397          * Unlike the other default functions, we don't check for depth
 2398          * here.  The peripheral driver level is the last level in the EDT,
 2399          * so if we're here, we should execute the function in question.
 2400          */
 2401         return(tr_func(periph, tr_config->tr_arg));
 2402 }
 2403 
 2404 /*
 2405  * Execute the given function for every bus in the EDT.
 2406  */
 2407 static int
 2408 xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg)
 2409 {
 2410         struct xpt_traverse_config tr_config;
 2411 
 2412         tr_config.depth = XPT_DEPTH_BUS;
 2413         tr_config.tr_func = tr_func;
 2414         tr_config.tr_arg = arg;
 2415 
 2416         return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
 2417 }
 2418 
 2419 /*
 2420  * Execute the given function for every device in the EDT.
 2421  */
 2422 static int
 2423 xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg)
 2424 {
 2425         struct xpt_traverse_config tr_config;
 2426 
 2427         tr_config.depth = XPT_DEPTH_DEVICE;
 2428         tr_config.tr_func = tr_func;
 2429         tr_config.tr_arg = arg;
 2430 
 2431         return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
 2432 }
 2433 
 2434 static int
 2435 xptsetasyncfunc(struct cam_ed *device, void *arg)
 2436 {
 2437         struct cam_path path;
 2438         struct ccb_getdev cgd;
 2439         struct ccb_setasync *csa = (struct ccb_setasync *)arg;
 2440 
 2441         /*
 2442          * Don't report unconfigured devices (Wildcard devs,
 2443          * devices only for target mode, device instances
 2444          * that have been invalidated but are waiting for
 2445          * their last reference count to be released).
 2446          */
 2447         if ((device->flags & CAM_DEV_UNCONFIGURED) != 0)
 2448                 return (1);
 2449 
 2450         xpt_compile_path(&path,
 2451                          NULL,
 2452                          device->target->bus->path_id,
 2453                          device->target->target_id,
 2454                          device->lun_id);
 2455         xpt_setup_ccb(&cgd.ccb_h, &path, CAM_PRIORITY_NORMAL);
 2456         cgd.ccb_h.func_code = XPT_GDEV_TYPE;
 2457         xpt_action((union ccb *)&cgd);
 2458         csa->callback(csa->callback_arg,
 2459                             AC_FOUND_DEVICE,
 2460                             &path, &cgd);
 2461         xpt_release_path(&path);
 2462 
 2463         return(1);
 2464 }
 2465 
 2466 static int
 2467 xptsetasyncbusfunc(struct cam_eb *bus, void *arg)
 2468 {
 2469         struct cam_path path;
 2470         struct ccb_pathinq cpi;
 2471         struct ccb_setasync *csa = (struct ccb_setasync *)arg;
 2472 
 2473         xpt_compile_path(&path, /*periph*/NULL,
 2474                          bus->path_id,
 2475                          CAM_TARGET_WILDCARD,
 2476                          CAM_LUN_WILDCARD);
 2477         xpt_path_lock(&path);
 2478         xpt_setup_ccb(&cpi.ccb_h, &path, CAM_PRIORITY_NORMAL);
 2479         cpi.ccb_h.func_code = XPT_PATH_INQ;
 2480         xpt_action((union ccb *)&cpi);
 2481         csa->callback(csa->callback_arg,
 2482                             AC_PATH_REGISTERED,
 2483                             &path, &cpi);
 2484         xpt_path_unlock(&path);
 2485         xpt_release_path(&path);
 2486 
 2487         return(1);
 2488 }
 2489 
 2490 void
 2491 xpt_action(union ccb *start_ccb)
 2492 {
 2493 
 2494         CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE,
 2495             ("xpt_action: func %#x %s\n", start_ccb->ccb_h.func_code,
 2496                 xpt_action_name(start_ccb->ccb_h.func_code)));
 2497 
 2498         start_ccb->ccb_h.status = CAM_REQ_INPROG;
 2499         (*(start_ccb->ccb_h.path->bus->xport->ops->action))(start_ccb);
 2500 }
 2501 
 2502 void
 2503 xpt_action_default(union ccb *start_ccb)
 2504 {
 2505         struct cam_path *path;
 2506         struct cam_sim *sim;
 2507         struct mtx *mtx;
 2508 
 2509         path = start_ccb->ccb_h.path;
 2510         CAM_DEBUG(path, CAM_DEBUG_TRACE,
 2511             ("xpt_action_default: func %#x %s\n", start_ccb->ccb_h.func_code,
 2512                 xpt_action_name(start_ccb->ccb_h.func_code)));
 2513 
 2514         switch (start_ccb->ccb_h.func_code) {
 2515         case XPT_SCSI_IO:
 2516         {
 2517                 struct cam_ed *device;
 2518 
 2519                 /*
 2520                  * For the sake of compatibility with SCSI-1
 2521                  * devices that may not understand the identify
 2522                  * message, we include lun information in the
 2523                  * second byte of all commands.  SCSI-1 specifies
 2524                  * that luns are a 3 bit value and reserves only 3
 2525                  * bits for lun information in the CDB.  Later
 2526                  * revisions of the SCSI spec allow for more than 8
 2527                  * luns, but have deprecated lun information in the
 2528                  * CDB.  So, if the lun won't fit, we must omit.
 2529                  *
 2530                  * Also be aware that during initial probing for devices,
 2531                  * the inquiry information is unknown but initialized to 0.
 2532                  * This means that this code will be exercised while probing
 2533                  * devices with an ANSI revision greater than 2.
 2534                  */
 2535                 device = path->device;
 2536                 if (device->protocol_version <= SCSI_REV_2
 2537                  && start_ccb->ccb_h.target_lun < 8
 2538                  && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) {
 2539 
 2540                         start_ccb->csio.cdb_io.cdb_bytes[1] |=
 2541                             start_ccb->ccb_h.target_lun << 5;
 2542                 }
 2543                 start_ccb->csio.scsi_status = SCSI_STATUS_OK;
 2544         }
 2545         /* FALLTHROUGH */
 2546         case XPT_TARGET_IO:
 2547         case XPT_CONT_TARGET_IO:
 2548                 start_ccb->csio.sense_resid = 0;
 2549                 start_ccb->csio.resid = 0;
 2550                 /* FALLTHROUGH */
 2551         case XPT_ATA_IO:
 2552                 if (start_ccb->ccb_h.func_code == XPT_ATA_IO)
 2553                         start_ccb->ataio.resid = 0;
 2554                 /* FALLTHROUGH */
 2555         case XPT_NVME_IO:
 2556                 /* FALLTHROUGH */
 2557         case XPT_NVME_ADMIN:
 2558                 /* FALLTHROUGH */
 2559         case XPT_RESET_DEV:
 2560         case XPT_ENG_EXEC:
 2561         case XPT_SMP_IO:
 2562         {
 2563                 struct cam_devq *devq;
 2564 
 2565                 devq = path->bus->sim->devq;
 2566                 mtx_lock(&devq->send_mtx);
 2567                 cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb);
 2568                 if (xpt_schedule_devq(devq, path->device) != 0)
 2569                         xpt_run_devq(devq);
 2570                 mtx_unlock(&devq->send_mtx);
 2571                 break;
 2572         }
 2573         case XPT_CALC_GEOMETRY:
 2574                 /* Filter out garbage */
 2575                 if (start_ccb->ccg.block_size == 0
 2576                  || start_ccb->ccg.volume_size == 0) {
 2577                         start_ccb->ccg.cylinders = 0;
 2578                         start_ccb->ccg.heads = 0;
 2579                         start_ccb->ccg.secs_per_track = 0;
 2580                         start_ccb->ccb_h.status = CAM_REQ_CMP;
 2581                         break;
 2582                 }
 2583 #if defined(PC98) || defined(__sparc64__)
 2584                 /*
 2585                  * In a PC-98 system, geometry translation depens on
 2586                  * the "real" device geometry obtained from mode page 4.
 2587                  * SCSI geometry translation is performed in the
 2588                  * initialization routine of the SCSI BIOS and the result
 2589                  * stored in host memory.  If the translation is available
 2590                  * in host memory, use it.  If not, rely on the default
 2591                  * translation the device driver performs.
 2592                  * For sparc64, we may need adjust the geometry of large
 2593                  * disks in order to fit the limitations of the 16-bit
 2594                  * fields of the VTOC8 disk label.
 2595                  */
 2596                 if (scsi_da_bios_params(&start_ccb->ccg) != 0) {
 2597                         start_ccb->ccb_h.status = CAM_REQ_CMP;
 2598                         break;
 2599                 }
 2600 #endif
 2601                 goto call_sim;
 2602         case XPT_ABORT:
 2603         {
 2604                 union ccb* abort_ccb;
 2605 
 2606                 abort_ccb = start_ccb->cab.abort_ccb;
 2607                 if (XPT_FC_IS_DEV_QUEUED(abort_ccb)) {
 2608                         struct cam_ed *device;
 2609                         struct cam_devq *devq;
 2610 
 2611                         device = abort_ccb->ccb_h.path->device;
 2612                         devq = device->sim->devq;
 2613 
 2614                         mtx_lock(&devq->send_mtx);
 2615                         if (abort_ccb->ccb_h.pinfo.index > 0) {
 2616                                 cam_ccbq_remove_ccb(&device->ccbq, abort_ccb);
 2617                                 abort_ccb->ccb_h.status =
 2618                                     CAM_REQ_ABORTED|CAM_DEV_QFRZN;
 2619                                 xpt_freeze_devq_device(device, 1);
 2620                                 mtx_unlock(&devq->send_mtx);
 2621                                 xpt_done(abort_ccb);
 2622                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 2623                                 break;
 2624                         }
 2625                         mtx_unlock(&devq->send_mtx);
 2626 
 2627                         if (abort_ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX
 2628                          && (abort_ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) {
 2629                                 /*
 2630                                  * We've caught this ccb en route to
 2631                                  * the SIM.  Flag it for abort and the
 2632                                  * SIM will do so just before starting
 2633                                  * real work on the CCB.
 2634                                  */
 2635                                 abort_ccb->ccb_h.status =
 2636                                     CAM_REQ_ABORTED|CAM_DEV_QFRZN;
 2637                                 xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
 2638                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 2639                                 break;
 2640                         }
 2641                 }
 2642                 if (XPT_FC_IS_QUEUED(abort_ccb)
 2643                  && (abort_ccb->ccb_h.pinfo.index == CAM_DONEQ_INDEX)) {
 2644                         /*
 2645                          * It's already completed but waiting
 2646                          * for our SWI to get to it.
 2647                          */
 2648                         start_ccb->ccb_h.status = CAM_UA_ABORT;
 2649                         break;
 2650                 }
 2651                 /*
 2652                  * If we weren't able to take care of the abort request
 2653                  * in the XPT, pass the request down to the SIM for processing.
 2654                  */
 2655         }
 2656         /* FALLTHROUGH */
 2657         case XPT_ACCEPT_TARGET_IO:
 2658         case XPT_EN_LUN:
 2659         case XPT_IMMED_NOTIFY:
 2660         case XPT_NOTIFY_ACK:
 2661         case XPT_RESET_BUS:
 2662         case XPT_IMMEDIATE_NOTIFY:
 2663         case XPT_NOTIFY_ACKNOWLEDGE:
 2664         case XPT_GET_SIM_KNOB_OLD:
 2665         case XPT_GET_SIM_KNOB:
 2666         case XPT_SET_SIM_KNOB:
 2667         case XPT_GET_TRAN_SETTINGS:
 2668         case XPT_SET_TRAN_SETTINGS:
 2669         case XPT_PATH_INQ:
 2670 call_sim:
 2671                 sim = path->bus->sim;
 2672                 mtx = sim->mtx;
 2673                 if (mtx && !mtx_owned(mtx))
 2674                         mtx_lock(mtx);
 2675                 else
 2676                         mtx = NULL;
 2677                 CAM_DEBUG(path, CAM_DEBUG_TRACE,
 2678                     ("sim->sim_action: func=%#x\n", start_ccb->ccb_h.func_code));
 2679                 (*(sim->sim_action))(sim, start_ccb);
 2680                 CAM_DEBUG(path, CAM_DEBUG_TRACE,
 2681                     ("sim->sim_action: status=%#x\n", start_ccb->ccb_h.status));
 2682                 if (mtx)
 2683                         mtx_unlock(mtx);
 2684                 break;
 2685         case XPT_PATH_STATS:
 2686                 start_ccb->cpis.last_reset = path->bus->last_reset;
 2687                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 2688                 break;
 2689         case XPT_GDEV_TYPE:
 2690         {
 2691                 struct cam_ed *dev;
 2692 
 2693                 dev = path->device;
 2694                 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
 2695                         start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
 2696                 } else {
 2697                         struct ccb_getdev *cgd;
 2698 
 2699                         cgd = &start_ccb->cgd;
 2700                         cgd->protocol = dev->protocol;
 2701                         cgd->inq_data = dev->inq_data;
 2702                         cgd->ident_data = dev->ident_data;
 2703                         cgd->inq_flags = dev->inq_flags;
 2704                         cgd->ccb_h.status = CAM_REQ_CMP;
 2705                         cgd->serial_num_len = dev->serial_num_len;
 2706                         if ((dev->serial_num_len > 0)
 2707                          && (dev->serial_num != NULL))
 2708                                 bcopy(dev->serial_num, cgd->serial_num,
 2709                                       dev->serial_num_len);
 2710                 }
 2711                 break;
 2712         }
 2713         case XPT_GDEV_STATS:
 2714         {
 2715                 struct ccb_getdevstats *cgds = &start_ccb->cgds;
 2716                 struct cam_ed *dev = path->device;
 2717                 struct cam_eb *bus = path->bus;
 2718                 struct cam_et *tar = path->target;
 2719                 struct cam_devq *devq = bus->sim->devq;
 2720 
 2721                 mtx_lock(&devq->send_mtx);
 2722                 cgds->dev_openings = dev->ccbq.dev_openings;
 2723                 cgds->dev_active = dev->ccbq.dev_active;
 2724                 cgds->allocated = dev->ccbq.allocated;
 2725                 cgds->queued = cam_ccbq_pending_ccb_count(&dev->ccbq);
 2726                 cgds->held = cgds->allocated - cgds->dev_active - cgds->queued;
 2727                 cgds->last_reset = tar->last_reset;
 2728                 cgds->maxtags = dev->maxtags;
 2729                 cgds->mintags = dev->mintags;
 2730                 if (timevalcmp(&tar->last_reset, &bus->last_reset, <))
 2731                         cgds->last_reset = bus->last_reset;
 2732                 mtx_unlock(&devq->send_mtx);
 2733                 cgds->ccb_h.status = CAM_REQ_CMP;
 2734                 break;
 2735         }
 2736         case XPT_GDEVLIST:
 2737         {
 2738                 struct cam_periph       *nperiph;
 2739                 struct periph_list      *periph_head;
 2740                 struct ccb_getdevlist   *cgdl;
 2741                 u_int                   i;
 2742                 struct cam_ed           *device;
 2743                 int                     found;
 2744 
 2745 
 2746                 found = 0;
 2747 
 2748                 /*
 2749                  * Don't want anyone mucking with our data.
 2750                  */
 2751                 device = path->device;
 2752                 periph_head = &device->periphs;
 2753                 cgdl = &start_ccb->cgdl;
 2754 
 2755                 /*
 2756                  * Check and see if the list has changed since the user
 2757                  * last requested a list member.  If so, tell them that the
 2758                  * list has changed, and therefore they need to start over
 2759                  * from the beginning.
 2760                  */
 2761                 if ((cgdl->index != 0) &&
 2762                     (cgdl->generation != device->generation)) {
 2763                         cgdl->status = CAM_GDEVLIST_LIST_CHANGED;
 2764                         break;
 2765                 }
 2766 
 2767                 /*
 2768                  * Traverse the list of peripherals and attempt to find
 2769                  * the requested peripheral.
 2770                  */
 2771                 for (nperiph = SLIST_FIRST(periph_head), i = 0;
 2772                      (nperiph != NULL) && (i <= cgdl->index);
 2773                      nperiph = SLIST_NEXT(nperiph, periph_links), i++) {
 2774                         if (i == cgdl->index) {
 2775                                 strlcpy(cgdl->periph_name,
 2776                                         nperiph->periph_name,
 2777                                         sizeof(cgdl->periph_name));
 2778                                 cgdl->unit_number = nperiph->unit_number;
 2779                                 found = 1;
 2780                         }
 2781                 }
 2782                 if (found == 0) {
 2783                         cgdl->status = CAM_GDEVLIST_ERROR;
 2784                         break;
 2785                 }
 2786 
 2787                 if (nperiph == NULL)
 2788                         cgdl->status = CAM_GDEVLIST_LAST_DEVICE;
 2789                 else
 2790                         cgdl->status = CAM_GDEVLIST_MORE_DEVS;
 2791 
 2792                 cgdl->index++;
 2793                 cgdl->generation = device->generation;
 2794 
 2795                 cgdl->ccb_h.status = CAM_REQ_CMP;
 2796                 break;
 2797         }
 2798         case XPT_DEV_MATCH:
 2799         {
 2800                 dev_pos_type position_type;
 2801                 struct ccb_dev_match *cdm;
 2802 
 2803                 cdm = &start_ccb->cdm;
 2804 
 2805                 /*
 2806                  * There are two ways of getting at information in the EDT.
 2807                  * The first way is via the primary EDT tree.  It starts
 2808                  * with a list of busses, then a list of targets on a bus,
 2809                  * then devices/luns on a target, and then peripherals on a
 2810                  * device/lun.  The "other" way is by the peripheral driver
 2811                  * lists.  The peripheral driver lists are organized by
 2812                  * peripheral driver.  (obviously)  So it makes sense to
 2813                  * use the peripheral driver list if the user is looking
 2814                  * for something like "da1", or all "da" devices.  If the
 2815                  * user is looking for something on a particular bus/target
 2816                  * or lun, it's generally better to go through the EDT tree.
 2817                  */
 2818 
 2819                 if (cdm->pos.position_type != CAM_DEV_POS_NONE)
 2820                         position_type = cdm->pos.position_type;
 2821                 else {
 2822                         u_int i;
 2823 
 2824                         position_type = CAM_DEV_POS_NONE;
 2825 
 2826                         for (i = 0; i < cdm->num_patterns; i++) {
 2827                                 if ((cdm->patterns[i].type == DEV_MATCH_BUS)
 2828                                  ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){
 2829                                         position_type = CAM_DEV_POS_EDT;
 2830                                         break;
 2831                                 }
 2832                         }
 2833 
 2834                         if (cdm->num_patterns == 0)
 2835                                 position_type = CAM_DEV_POS_EDT;
 2836                         else if (position_type == CAM_DEV_POS_NONE)
 2837                                 position_type = CAM_DEV_POS_PDRV;
 2838                 }
 2839 
 2840                 switch(position_type & CAM_DEV_POS_TYPEMASK) {
 2841                 case CAM_DEV_POS_EDT:
 2842                         xptedtmatch(cdm);
 2843                         break;
 2844                 case CAM_DEV_POS_PDRV:
 2845                         xptperiphlistmatch(cdm);
 2846                         break;
 2847                 default:
 2848                         cdm->status = CAM_DEV_MATCH_ERROR;
 2849                         break;
 2850                 }
 2851 
 2852                 if (cdm->status == CAM_DEV_MATCH_ERROR)
 2853                         start_ccb->ccb_h.status = CAM_REQ_CMP_ERR;
 2854                 else
 2855                         start_ccb->ccb_h.status = CAM_REQ_CMP;
 2856 
 2857                 break;
 2858         }
 2859         case XPT_SASYNC_CB:
 2860         {
 2861                 struct ccb_setasync *csa;
 2862                 struct async_node *cur_entry;
 2863                 struct async_list *async_head;
 2864                 u_int32_t added;
 2865 
 2866                 csa = &start_ccb->csa;
 2867                 added = csa->event_enable;
 2868                 async_head = &path->device->asyncs;
 2869 
 2870                 /*
 2871                  * If there is already an entry for us, simply
 2872                  * update it.
 2873                  */
 2874                 cur_entry = SLIST_FIRST(async_head);
 2875                 while (cur_entry != NULL) {
 2876                         if ((cur_entry->callback_arg == csa->callback_arg)
 2877                          && (cur_entry->callback == csa->callback))
 2878                                 break;
 2879                         cur_entry = SLIST_NEXT(cur_entry, links);
 2880                 }
 2881 
 2882                 if (cur_entry != NULL) {
 2883                         /*
 2884                          * If the request has no flags set,
 2885                          * remove the entry.
 2886                          */
 2887                         added &= ~cur_entry->event_enable;
 2888                         if (csa->event_enable == 0) {
 2889                                 SLIST_REMOVE(async_head, cur_entry,
 2890                                              async_node, links);
 2891                                 xpt_release_device(path->device);
 2892                                 free(cur_entry, M_CAMXPT);
 2893                         } else {
 2894                                 cur_entry->event_enable = csa->event_enable;
 2895                         }
 2896                         csa->event_enable = added;
 2897                 } else {
 2898                         cur_entry = malloc(sizeof(*cur_entry), M_CAMXPT,
 2899                                            M_NOWAIT);
 2900                         if (cur_entry == NULL) {
 2901                                 csa->ccb_h.status = CAM_RESRC_UNAVAIL;
 2902                                 break;
 2903                         }
 2904                         cur_entry->event_enable = csa->event_enable;
 2905                         cur_entry->event_lock = (path->bus->sim->mtx &&
 2906                             mtx_owned(path->bus->sim->mtx)) ? 1 : 0;
 2907                         cur_entry->callback_arg = csa->callback_arg;
 2908                         cur_entry->callback = csa->callback;
 2909                         SLIST_INSERT_HEAD(async_head, cur_entry, links);
 2910                         xpt_acquire_device(path->device);
 2911                 }
 2912                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 2913                 break;
 2914         }
 2915         case XPT_REL_SIMQ:
 2916         {
 2917                 struct ccb_relsim *crs;
 2918                 struct cam_ed *dev;
 2919 
 2920                 crs = &start_ccb->crs;
 2921                 dev = path->device;
 2922                 if (dev == NULL) {
 2923 
 2924                         crs->ccb_h.status = CAM_DEV_NOT_THERE;
 2925                         break;
 2926                 }
 2927 
 2928                 if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) {
 2929 
 2930                         /* Don't ever go below one opening */
 2931                         if (crs->openings > 0) {
 2932                                 xpt_dev_ccbq_resize(path, crs->openings);
 2933                                 if (bootverbose) {
 2934                                         xpt_print(path,
 2935                                             "number of openings is now %d\n",
 2936                                             crs->openings);
 2937                                 }
 2938                         }
 2939                 }
 2940 
 2941                 mtx_lock(&dev->sim->devq->send_mtx);
 2942                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) {
 2943 
 2944                         if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
 2945 
 2946                                 /*
 2947                                  * Just extend the old timeout and decrement
 2948                                  * the freeze count so that a single timeout
 2949                                  * is sufficient for releasing the queue.
 2950                                  */
 2951                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
 2952                                 callout_stop(&dev->callout);
 2953                         } else {
 2954 
 2955                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
 2956                         }
 2957 
 2958                         callout_reset_sbt(&dev->callout,
 2959                             SBT_1MS * crs->release_timeout, 0,
 2960                             xpt_release_devq_timeout, dev, 0);
 2961 
 2962                         dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING;
 2963 
 2964                 }
 2965 
 2966                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) {
 2967 
 2968                         if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) {
 2969                                 /*
 2970                                  * Decrement the freeze count so that a single
 2971                                  * completion is still sufficient to unfreeze
 2972                                  * the queue.
 2973                                  */
 2974                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
 2975                         } else {
 2976 
 2977                                 dev->flags |= CAM_DEV_REL_ON_COMPLETE;
 2978                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
 2979                         }
 2980                 }
 2981 
 2982                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) {
 2983 
 2984                         if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
 2985                          || (dev->ccbq.dev_active == 0)) {
 2986 
 2987                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
 2988                         } else {
 2989 
 2990                                 dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY;
 2991                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
 2992                         }
 2993                 }
 2994                 mtx_unlock(&dev->sim->devq->send_mtx);
 2995 
 2996                 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0)
 2997                         xpt_release_devq(path, /*count*/1, /*run_queue*/TRUE);
 2998                 start_ccb->crs.qfrozen_cnt = dev->ccbq.queue.qfrozen_cnt;
 2999                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 3000                 break;
 3001         }
 3002         case XPT_DEBUG: {
 3003                 struct cam_path *oldpath;
 3004 
 3005                 /* Check that all request bits are supported. */
 3006                 if (start_ccb->cdbg.flags & ~(CAM_DEBUG_COMPILE)) {
 3007                         start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
 3008                         break;
 3009                 }
 3010 
 3011                 cam_dflags = CAM_DEBUG_NONE;
 3012                 if (cam_dpath != NULL) {
 3013                         oldpath = cam_dpath;
 3014                         cam_dpath = NULL;
 3015                         xpt_free_path(oldpath);
 3016                 }
 3017                 if (start_ccb->cdbg.flags != CAM_DEBUG_NONE) {
 3018                         if (xpt_create_path(&cam_dpath, NULL,
 3019                                             start_ccb->ccb_h.path_id,
 3020                                             start_ccb->ccb_h.target_id,
 3021                                             start_ccb->ccb_h.target_lun) !=
 3022                                             CAM_REQ_CMP) {
 3023                                 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
 3024                         } else {
 3025                                 cam_dflags = start_ccb->cdbg.flags;
 3026                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 3027                                 xpt_print(cam_dpath, "debugging flags now %x\n",
 3028                                     cam_dflags);
 3029                         }
 3030                 } else
 3031                         start_ccb->ccb_h.status = CAM_REQ_CMP;
 3032                 break;
 3033         }
 3034         case XPT_NOOP:
 3035                 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0)
 3036                         xpt_freeze_devq(path, 1);
 3037                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 3038                 break;
 3039         case XPT_REPROBE_LUN:
 3040                 xpt_async(AC_INQ_CHANGED, path, NULL);
 3041                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 3042                 xpt_done(start_ccb);
 3043                 break;
 3044         default:
 3045         case XPT_SDEV_TYPE:
 3046         case XPT_TERM_IO:
 3047         case XPT_ENG_INQ:
 3048                 /* XXX Implement */
 3049                 xpt_print_path(start_ccb->ccb_h.path);
 3050                 printf("%s: CCB type %#x %s not supported\n", __func__,
 3051                     start_ccb->ccb_h.func_code,
 3052                     xpt_action_name(start_ccb->ccb_h.func_code));
 3053                 start_ccb->ccb_h.status = CAM_PROVIDE_FAIL;
 3054                 if (start_ccb->ccb_h.func_code & XPT_FC_DEV_QUEUED) {
 3055                         xpt_done(start_ccb);
 3056                 }
 3057                 break;
 3058         }
 3059         CAM_DEBUG(path, CAM_DEBUG_TRACE,
 3060             ("xpt_action_default: func= %#x %s status %#x\n",
 3061                 start_ccb->ccb_h.func_code,
 3062                 xpt_action_name(start_ccb->ccb_h.func_code),
 3063                 start_ccb->ccb_h.status));
 3064 }
 3065 
 3066 void
 3067 xpt_polled_action(union ccb *start_ccb)
 3068 {
 3069         u_int32_t timeout;
 3070         struct    cam_sim *sim;
 3071         struct    cam_devq *devq;
 3072         struct    cam_ed *dev;
 3073         struct mtx *mtx;
 3074 
 3075         timeout = start_ccb->ccb_h.timeout * 10;
 3076         sim = start_ccb->ccb_h.path->bus->sim;
 3077         devq = sim->devq;
 3078         mtx = sim->mtx;
 3079         dev = start_ccb->ccb_h.path->device;
 3080 
 3081         mtx_unlock(&dev->device_mtx);
 3082 
 3083         /*
 3084          * Steal an opening so that no other queued requests
 3085          * can get it before us while we simulate interrupts.
 3086          */
 3087         mtx_lock(&devq->send_mtx);
 3088         dev->ccbq.dev_openings--;
 3089         while((devq->send_openings <= 0 || dev->ccbq.dev_openings < 0) &&
 3090             (--timeout > 0)) {
 3091                 mtx_unlock(&devq->send_mtx);
 3092                 DELAY(100);
 3093                 if (mtx)
 3094                         mtx_lock(mtx);
 3095                 (*(sim->sim_poll))(sim);
 3096                 if (mtx)
 3097                         mtx_unlock(mtx);
 3098                 camisr_runqueue();
 3099                 mtx_lock(&devq->send_mtx);
 3100         }
 3101         dev->ccbq.dev_openings++;
 3102         mtx_unlock(&devq->send_mtx);
 3103 
 3104         if (timeout != 0) {
 3105                 xpt_action(start_ccb);
 3106                 while(--timeout > 0) {
 3107                         if (mtx)
 3108                                 mtx_lock(mtx);
 3109                         (*(sim->sim_poll))(sim);
 3110                         if (mtx)
 3111                                 mtx_unlock(mtx);
 3112                         camisr_runqueue();
 3113                         if ((start_ccb->ccb_h.status  & CAM_STATUS_MASK)
 3114                             != CAM_REQ_INPROG)
 3115                                 break;
 3116                         DELAY(100);
 3117                 }
 3118                 if (timeout == 0) {
 3119                         /*
 3120                          * XXX Is it worth adding a sim_timeout entry
 3121                          * point so we can attempt recovery?  If
 3122                          * this is only used for dumps, I don't think
 3123                          * it is.
 3124                          */
 3125                         start_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
 3126                 }
 3127         } else {
 3128                 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
 3129         }
 3130 
 3131         mtx_lock(&dev->device_mtx);
 3132 }
 3133 
 3134 /*
 3135  * Schedule a peripheral driver to receive a ccb when its
 3136  * target device has space for more transactions.
 3137  */
 3138 void
 3139 xpt_schedule(struct cam_periph *periph, u_int32_t new_priority)
 3140 {
 3141 
 3142         CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n"));
 3143         cam_periph_assert(periph, MA_OWNED);
 3144         if (new_priority < periph->scheduled_priority) {
 3145                 periph->scheduled_priority = new_priority;
 3146                 xpt_run_allocq(periph, 0);
 3147         }
 3148 }
 3149 
 3150 
 3151 /*
 3152  * Schedule a device to run on a given queue.
 3153  * If the device was inserted as a new entry on the queue,
 3154  * return 1 meaning the device queue should be run. If we
 3155  * were already queued, implying someone else has already
 3156  * started the queue, return 0 so the caller doesn't attempt
 3157  * to run the queue.
 3158  */
 3159 static int
 3160 xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo,
 3161                  u_int32_t new_priority)
 3162 {
 3163         int retval;
 3164         u_int32_t old_priority;
 3165 
 3166         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_schedule_dev\n"));
 3167 
 3168         old_priority = pinfo->priority;
 3169 
 3170         /*
 3171          * Are we already queued?
 3172          */
 3173         if (pinfo->index != CAM_UNQUEUED_INDEX) {
 3174                 /* Simply reorder based on new priority */
 3175                 if (new_priority < old_priority) {
 3176                         camq_change_priority(queue, pinfo->index,
 3177                                              new_priority);
 3178                         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3179                                         ("changed priority to %d\n",
 3180                                          new_priority));
 3181                         retval = 1;
 3182                 } else
 3183                         retval = 0;
 3184         } else {
 3185                 /* New entry on the queue */
 3186                 if (new_priority < old_priority)
 3187                         pinfo->priority = new_priority;
 3188 
 3189                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3190                                 ("Inserting onto queue\n"));
 3191                 pinfo->generation = ++queue->generation;
 3192                 camq_insert(queue, pinfo);
 3193                 retval = 1;
 3194         }
 3195         return (retval);
 3196 }
 3197 
 3198 static void
 3199 xpt_run_allocq_task(void *context, int pending)
 3200 {
 3201         struct cam_periph *periph = context;
 3202 
 3203         cam_periph_lock(periph);
 3204         periph->flags &= ~CAM_PERIPH_RUN_TASK;
 3205         xpt_run_allocq(periph, 1);
 3206         cam_periph_unlock(periph);
 3207         cam_periph_release(periph);
 3208 }
 3209 
 3210 static void
 3211 xpt_run_allocq(struct cam_periph *periph, int sleep)
 3212 {
 3213         struct cam_ed   *device;
 3214         union ccb       *ccb;
 3215         uint32_t         prio;
 3216 
 3217         cam_periph_assert(periph, MA_OWNED);
 3218         if (periph->periph_allocating)
 3219                 return;
 3220         periph->periph_allocating = 1;
 3221         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_allocq(%p)\n", periph));
 3222         device = periph->path->device;
 3223         ccb = NULL;
 3224 restart:
 3225         while ((prio = min(periph->scheduled_priority,
 3226             periph->immediate_priority)) != CAM_PRIORITY_NONE &&
 3227             (periph->periph_allocated - (ccb != NULL ? 1 : 0) <
 3228              device->ccbq.total_openings || prio <= CAM_PRIORITY_OOB)) {
 3229 
 3230                 if (ccb == NULL &&
 3231                     (ccb = xpt_get_ccb_nowait(periph)) == NULL) {
 3232                         if (sleep) {
 3233                                 ccb = xpt_get_ccb(periph);
 3234                                 goto restart;
 3235                         }
 3236                         if (periph->flags & CAM_PERIPH_RUN_TASK)
 3237                                 break;
 3238                         cam_periph_doacquire(periph);
 3239                         periph->flags |= CAM_PERIPH_RUN_TASK;
 3240                         taskqueue_enqueue(xsoftc.xpt_taskq,
 3241                             &periph->periph_run_task);
 3242                         break;
 3243                 }
 3244                 xpt_setup_ccb(&ccb->ccb_h, periph->path, prio);
 3245                 if (prio == periph->immediate_priority) {
 3246                         periph->immediate_priority = CAM_PRIORITY_NONE;
 3247                         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3248                                         ("waking cam_periph_getccb()\n"));
 3249                         SLIST_INSERT_HEAD(&periph->ccb_list, &ccb->ccb_h,
 3250                                           periph_links.sle);
 3251                         wakeup(&periph->ccb_list);
 3252                 } else {
 3253                         periph->scheduled_priority = CAM_PRIORITY_NONE;
 3254                         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3255                                         ("calling periph_start()\n"));
 3256                         periph->periph_start(periph, ccb);
 3257                 }
 3258                 ccb = NULL;
 3259         }
 3260         if (ccb != NULL)
 3261                 xpt_release_ccb(ccb);
 3262         periph->periph_allocating = 0;
 3263 }
 3264 
 3265 static void
 3266 xpt_run_devq(struct cam_devq *devq)
 3267 {
 3268         struct mtx *mtx;
 3269 
 3270         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_devq\n"));
 3271 
 3272         devq->send_queue.qfrozen_cnt++;
 3273         while ((devq->send_queue.entries > 0)
 3274             && (devq->send_openings > 0)
 3275             && (devq->send_queue.qfrozen_cnt <= 1)) {
 3276                 struct  cam_ed *device;
 3277                 union ccb *work_ccb;
 3278                 struct  cam_sim *sim;
 3279                 struct xpt_proto *proto;
 3280 
 3281                 device = (struct cam_ed *)camq_remove(&devq->send_queue,
 3282                                                            CAMQ_HEAD);
 3283                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3284                                 ("running device %p\n", device));
 3285 
 3286                 work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD);
 3287                 if (work_ccb == NULL) {
 3288                         printf("device on run queue with no ccbs???\n");
 3289                         continue;
 3290                 }
 3291 
 3292                 if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) {
 3293 
 3294                         mtx_lock(&xsoftc.xpt_highpower_lock);
 3295                         if (xsoftc.num_highpower <= 0) {
 3296                                 /*
 3297                                  * We got a high power command, but we
 3298                                  * don't have any available slots.  Freeze
 3299                                  * the device queue until we have a slot
 3300                                  * available.
 3301                                  */
 3302                                 xpt_freeze_devq_device(device, 1);
 3303                                 STAILQ_INSERT_TAIL(&xsoftc.highpowerq, device,
 3304                                                    highpowerq_entry);
 3305 
 3306                                 mtx_unlock(&xsoftc.xpt_highpower_lock);
 3307                                 continue;
 3308                         } else {
 3309                                 /*
 3310                                  * Consume a high power slot while
 3311                                  * this ccb runs.
 3312                                  */
 3313                                 xsoftc.num_highpower--;
 3314                         }
 3315                         mtx_unlock(&xsoftc.xpt_highpower_lock);
 3316                 }
 3317                 cam_ccbq_remove_ccb(&device->ccbq, work_ccb);
 3318                 cam_ccbq_send_ccb(&device->ccbq, work_ccb);
 3319                 devq->send_openings--;
 3320                 devq->send_active++;
 3321                 xpt_schedule_devq(devq, device);
 3322                 mtx_unlock(&devq->send_mtx);
 3323 
 3324                 if ((work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0) {
 3325                         /*
 3326                          * The client wants to freeze the queue
 3327                          * after this CCB is sent.
 3328                          */
 3329                         xpt_freeze_devq(work_ccb->ccb_h.path, 1);
 3330                 }
 3331 
 3332                 /* In Target mode, the peripheral driver knows best... */
 3333                 if (work_ccb->ccb_h.func_code == XPT_SCSI_IO) {
 3334                         if ((device->inq_flags & SID_CmdQue) != 0
 3335                          && work_ccb->csio.tag_action != CAM_TAG_ACTION_NONE)
 3336                                 work_ccb->ccb_h.flags |= CAM_TAG_ACTION_VALID;
 3337                         else
 3338                                 /*
 3339                                  * Clear this in case of a retried CCB that
 3340                                  * failed due to a rejected tag.
 3341                                  */
 3342                                 work_ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
 3343                 }
 3344 
 3345                 KASSERT(device == work_ccb->ccb_h.path->device,
 3346                     ("device (%p) / path->device (%p) mismatch",
 3347                         device, work_ccb->ccb_h.path->device));
 3348                 proto = xpt_proto_find(device->protocol);
 3349                 if (proto && proto->ops->debug_out)
 3350                         proto->ops->debug_out(work_ccb);
 3351 
 3352                 /*
 3353                  * Device queues can be shared among multiple SIM instances
 3354                  * that reside on different busses.  Use the SIM from the
 3355                  * queued device, rather than the one from the calling bus.
 3356                  */
 3357                 sim = device->sim;
 3358                 mtx = sim->mtx;
 3359                 if (mtx && !mtx_owned(mtx))
 3360                         mtx_lock(mtx);
 3361                 else
 3362                         mtx = NULL;
 3363                 work_ccb->ccb_h.qos.sim_data = sbinuptime(); // xxx uintprt_t too small 32bit platforms
 3364                 (*(sim->sim_action))(sim, work_ccb);
 3365                 if (mtx)
 3366                         mtx_unlock(mtx);
 3367                 mtx_lock(&devq->send_mtx);
 3368         }
 3369         devq->send_queue.qfrozen_cnt--;
 3370 }
 3371 
 3372 /*
 3373  * This function merges stuff from the slave ccb into the master ccb, while
 3374  * keeping important fields in the master ccb constant.
 3375  */
 3376 void
 3377 xpt_merge_ccb(union ccb *master_ccb, union ccb *slave_ccb)
 3378 {
 3379 
 3380         /*
 3381          * Pull fields that are valid for peripheral drivers to set
 3382          * into the master CCB along with the CCB "payload".
 3383          */
 3384         master_ccb->ccb_h.retry_count = slave_ccb->ccb_h.retry_count;
 3385         master_ccb->ccb_h.func_code = slave_ccb->ccb_h.func_code;
 3386         master_ccb->ccb_h.timeout = slave_ccb->ccb_h.timeout;
 3387         master_ccb->ccb_h.flags = slave_ccb->ccb_h.flags;
 3388         bcopy(&(&slave_ccb->ccb_h)[1], &(&master_ccb->ccb_h)[1],
 3389               sizeof(union ccb) - sizeof(struct ccb_hdr));
 3390 }
 3391 
 3392 void
 3393 xpt_setup_ccb_flags(struct ccb_hdr *ccb_h, struct cam_path *path,
 3394                     u_int32_t priority, u_int32_t flags)
 3395 {
 3396 
 3397         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_setup_ccb\n"));
 3398         ccb_h->pinfo.priority = priority;
 3399         ccb_h->path = path;
 3400         ccb_h->path_id = path->bus->path_id;
 3401         if (path->target)
 3402                 ccb_h->target_id = path->target->target_id;
 3403         else
 3404                 ccb_h->target_id = CAM_TARGET_WILDCARD;
 3405         if (path->device) {
 3406                 ccb_h->target_lun = path->device->lun_id;
 3407                 ccb_h->pinfo.generation = ++path->device->ccbq.queue.generation;
 3408         } else {
 3409                 ccb_h->target_lun = CAM_TARGET_WILDCARD;
 3410         }
 3411         ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
 3412         ccb_h->flags = flags;
 3413         ccb_h->xflags = 0;
 3414 }
 3415 
 3416 void
 3417 xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority)
 3418 {
 3419         xpt_setup_ccb_flags(ccb_h, path, priority, /*flags*/ 0);
 3420 }
 3421 
 3422 /* Path manipulation functions */
 3423 cam_status
 3424 xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph,
 3425                 path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
 3426 {
 3427         struct     cam_path *path;
 3428         cam_status status;
 3429 
 3430         path = (struct cam_path *)malloc(sizeof(*path), M_CAMPATH, M_NOWAIT);
 3431 
 3432         if (path == NULL) {
 3433                 status = CAM_RESRC_UNAVAIL;
 3434                 return(status);
 3435         }
 3436         status = xpt_compile_path(path, perph, path_id, target_id, lun_id);
 3437         if (status != CAM_REQ_CMP) {
 3438                 free(path, M_CAMPATH);
 3439                 path = NULL;
 3440         }
 3441         *new_path_ptr = path;
 3442         return (status);
 3443 }
 3444 
 3445 cam_status
 3446 xpt_create_path_unlocked(struct cam_path **new_path_ptr,
 3447                          struct cam_periph *periph, path_id_t path_id,
 3448                          target_id_t target_id, lun_id_t lun_id)
 3449 {
 3450 
 3451         return (xpt_create_path(new_path_ptr, periph, path_id, target_id,
 3452             lun_id));
 3453 }
 3454 
 3455 cam_status
 3456 xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph,
 3457                  path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
 3458 {
 3459         struct       cam_eb *bus;
 3460         struct       cam_et *target;
 3461         struct       cam_ed *device;
 3462         cam_status   status;
 3463 
 3464         status = CAM_REQ_CMP;   /* Completed without error */
 3465         target = NULL;          /* Wildcarded */
 3466         device = NULL;          /* Wildcarded */
 3467 
 3468         /*
 3469          * We will potentially modify the EDT, so block interrupts
 3470          * that may attempt to create cam paths.
 3471          */
 3472         bus = xpt_find_bus(path_id);
 3473         if (bus == NULL) {
 3474                 status = CAM_PATH_INVALID;
 3475         } else {
 3476                 xpt_lock_buses();
 3477                 mtx_lock(&bus->eb_mtx);
 3478                 target = xpt_find_target(bus, target_id);
 3479                 if (target == NULL) {
 3480                         /* Create one */
 3481                         struct cam_et *new_target;
 3482 
 3483                         new_target = xpt_alloc_target(bus, target_id);
 3484                         if (new_target == NULL) {
 3485                                 status = CAM_RESRC_UNAVAIL;
 3486                         } else {
 3487                                 target = new_target;
 3488                         }
 3489                 }
 3490                 xpt_unlock_buses();
 3491                 if (target != NULL) {
 3492                         device = xpt_find_device(target, lun_id);
 3493                         if (device == NULL) {
 3494                                 /* Create one */
 3495                                 struct cam_ed *new_device;
 3496 
 3497                                 new_device =
 3498                                     (*(bus->xport->ops->alloc_device))(bus,
 3499                                                                        target,
 3500                                                                        lun_id);
 3501                                 if (new_device == NULL) {
 3502                                         status = CAM_RESRC_UNAVAIL;
 3503                                 } else {
 3504                                         device = new_device;
 3505                                 }
 3506                         }
 3507                 }
 3508                 mtx_unlock(&bus->eb_mtx);
 3509         }
 3510 
 3511         /*
 3512          * Only touch the user's data if we are successful.
 3513          */
 3514         if (status == CAM_REQ_CMP) {
 3515                 new_path->periph = perph;
 3516                 new_path->bus = bus;
 3517                 new_path->target = target;
 3518                 new_path->device = device;
 3519                 CAM_DEBUG(new_path, CAM_DEBUG_TRACE, ("xpt_compile_path\n"));
 3520         } else {
 3521                 if (device != NULL)
 3522                         xpt_release_device(device);
 3523                 if (target != NULL)
 3524                         xpt_release_target(target);
 3525                 if (bus != NULL)
 3526                         xpt_release_bus(bus);
 3527         }
 3528         return (status);
 3529 }
 3530 
 3531 cam_status
 3532 xpt_clone_path(struct cam_path **new_path_ptr, struct cam_path *path)
 3533 {
 3534         struct     cam_path *new_path;
 3535 
 3536         new_path = (struct cam_path *)malloc(sizeof(*path), M_CAMPATH, M_NOWAIT);
 3537         if (new_path == NULL)
 3538                 return(CAM_RESRC_UNAVAIL);
 3539         xpt_copy_path(new_path, path);
 3540         *new_path_ptr = new_path;
 3541         return (CAM_REQ_CMP);
 3542 }
 3543 
 3544 void
 3545 xpt_copy_path(struct cam_path *new_path, struct cam_path *path)
 3546 {
 3547 
 3548         *new_path = *path;
 3549         if (path->bus != NULL)
 3550                 xpt_acquire_bus(path->bus);
 3551         if (path->target != NULL)
 3552                 xpt_acquire_target(path->target);
 3553         if (path->device != NULL)
 3554                 xpt_acquire_device(path->device);
 3555 }
 3556 
 3557 void
 3558 xpt_release_path(struct cam_path *path)
 3559 {
 3560         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_path\n"));
 3561         if (path->device != NULL) {
 3562                 xpt_release_device(path->device);
 3563                 path->device = NULL;
 3564         }
 3565         if (path->target != NULL) {
 3566                 xpt_release_target(path->target);
 3567                 path->target = NULL;
 3568         }
 3569         if (path->bus != NULL) {
 3570                 xpt_release_bus(path->bus);
 3571                 path->bus = NULL;
 3572         }
 3573 }
 3574 
 3575 void
 3576 xpt_free_path(struct cam_path *path)
 3577 {
 3578 
 3579         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_free_path\n"));
 3580         xpt_release_path(path);
 3581         free(path, M_CAMPATH);
 3582 }
 3583 
 3584 void
 3585 xpt_path_counts(struct cam_path *path, uint32_t *bus_ref,
 3586     uint32_t *periph_ref, uint32_t *target_ref, uint32_t *device_ref)
 3587 {
 3588 
 3589         xpt_lock_buses();
 3590         if (bus_ref) {
 3591                 if (path->bus)
 3592                         *bus_ref = path->bus->refcount;
 3593                 else
 3594                         *bus_ref = 0;
 3595         }
 3596         if (periph_ref) {
 3597                 if (path->periph)
 3598                         *periph_ref = path->periph->refcount;
 3599                 else
 3600                         *periph_ref = 0;
 3601         }
 3602         xpt_unlock_buses();
 3603         if (target_ref) {
 3604                 if (path->target)
 3605                         *target_ref = path->target->refcount;
 3606                 else
 3607                         *target_ref = 0;
 3608         }
 3609         if (device_ref) {
 3610                 if (path->device)
 3611                         *device_ref = path->device->refcount;
 3612                 else
 3613                         *device_ref = 0;
 3614         }
 3615 }
 3616 
 3617 /*
 3618  * Return -1 for failure, 0 for exact match, 1 for match with wildcards
 3619  * in path1, 2 for match with wildcards in path2.
 3620  */
 3621 int
 3622 xpt_path_comp(struct cam_path *path1, struct cam_path *path2)
 3623 {
 3624         int retval = 0;
 3625 
 3626         if (path1->bus != path2->bus) {
 3627                 if (path1->bus->path_id == CAM_BUS_WILDCARD)
 3628                         retval = 1;
 3629                 else if (path2->bus->path_id == CAM_BUS_WILDCARD)
 3630                         retval = 2;
 3631                 else
 3632                         return (-1);
 3633         }
 3634         if (path1->target != path2->target) {
 3635                 if (path1->target->target_id == CAM_TARGET_WILDCARD) {
 3636                         if (retval == 0)
 3637                                 retval = 1;
 3638                 } else if (path2->target->target_id == CAM_TARGET_WILDCARD)
 3639                         retval = 2;
 3640                 else
 3641                         return (-1);
 3642         }
 3643         if (path1->device != path2->device) {
 3644                 if (path1->device->lun_id == CAM_LUN_WILDCARD) {
 3645                         if (retval == 0)
 3646                                 retval = 1;
 3647                 } else if (path2->device->lun_id == CAM_LUN_WILDCARD)
 3648                         retval = 2;
 3649                 else
 3650                         return (-1);
 3651         }
 3652         return (retval);
 3653 }
 3654 
 3655 int
 3656 xpt_path_comp_dev(struct cam_path *path, struct cam_ed *dev)
 3657 {
 3658         int retval = 0;
 3659 
 3660         if (path->bus != dev->target->bus) {
 3661                 if (path->bus->path_id == CAM_BUS_WILDCARD)
 3662                         retval = 1;
 3663                 else if (dev->target->bus->path_id == CAM_BUS_WILDCARD)
 3664                         retval = 2;
 3665                 else
 3666                         return (-1);
 3667         }
 3668         if (path->target != dev->target) {
 3669                 if (path->target->target_id == CAM_TARGET_WILDCARD) {
 3670                         if (retval == 0)
 3671                                 retval = 1;
 3672                 } else if (dev->target->target_id == CAM_TARGET_WILDCARD)
 3673                         retval = 2;
 3674                 else
 3675                         return (-1);
 3676         }
 3677         if (path->device != dev) {
 3678                 if (path->device->lun_id == CAM_LUN_WILDCARD) {
 3679                         if (retval == 0)
 3680                                 retval = 1;
 3681                 } else if (dev->lun_id == CAM_LUN_WILDCARD)
 3682                         retval = 2;
 3683                 else
 3684                         return (-1);
 3685         }
 3686         return (retval);
 3687 }
 3688 
 3689 void
 3690 xpt_print_path(struct cam_path *path)
 3691 {
 3692 
 3693         if (path == NULL)
 3694                 printf("(nopath): ");
 3695         else {
 3696                 if (path->periph != NULL)
 3697                         printf("(%s%d:", path->periph->periph_name,
 3698                                path->periph->unit_number);
 3699                 else
 3700                         printf("(noperiph:");
 3701 
 3702                 if (path->bus != NULL)
 3703                         printf("%s%d:%d:", path->bus->sim->sim_name,
 3704                                path->bus->sim->unit_number,
 3705                                path->bus->sim->bus_id);
 3706                 else
 3707                         printf("nobus:");
 3708 
 3709                 if (path->target != NULL)
 3710                         printf("%d:", path->target->target_id);
 3711                 else
 3712                         printf("X:");
 3713 
 3714                 if (path->device != NULL)
 3715                         printf("%jx): ", (uintmax_t)path->device->lun_id);
 3716                 else
 3717                         printf("X): ");
 3718         }
 3719 }
 3720 
 3721 void
 3722 xpt_print_device(struct cam_ed *device)
 3723 {
 3724 
 3725         if (device == NULL)
 3726                 printf("(nopath): ");
 3727         else {
 3728                 printf("(noperiph:%s%d:%d:%d:%jx): ", device->sim->sim_name,
 3729                        device->sim->unit_number,
 3730                        device->sim->bus_id,
 3731                        device->target->target_id,
 3732                        (uintmax_t)device->lun_id);
 3733         }
 3734 }
 3735 
 3736 void
 3737 xpt_print(struct cam_path *path, const char *fmt, ...)
 3738 {
 3739         va_list ap;
 3740         xpt_print_path(path);
 3741         va_start(ap, fmt);
 3742         vprintf(fmt, ap);
 3743         va_end(ap);
 3744 }
 3745 
 3746 int
 3747 xpt_path_string(struct cam_path *path, char *str, size_t str_len)
 3748 {
 3749         struct sbuf sb;
 3750 
 3751         sbuf_new(&sb, str, str_len, 0);
 3752 
 3753         if (path == NULL)
 3754                 sbuf_printf(&sb, "(nopath): ");
 3755         else {
 3756                 if (path->periph != NULL)
 3757                         sbuf_printf(&sb, "(%s%d:", path->periph->periph_name,
 3758                                     path->periph->unit_number);
 3759                 else
 3760                         sbuf_printf(&sb, "(noperiph:");
 3761 
 3762                 if (path->bus != NULL)
 3763                         sbuf_printf(&sb, "%s%d:%d:", path->bus->sim->sim_name,
 3764                                     path->bus->sim->unit_number,
 3765                                     path->bus->sim->bus_id);
 3766                 else
 3767                         sbuf_printf(&sb, "nobus:");
 3768 
 3769                 if (path->target != NULL)
 3770                         sbuf_printf(&sb, "%d:", path->target->target_id);
 3771                 else
 3772                         sbuf_printf(&sb, "X:");
 3773 
 3774                 if (path->device != NULL)
 3775                         sbuf_printf(&sb, "%jx): ",
 3776                             (uintmax_t)path->device->lun_id);
 3777                 else
 3778                         sbuf_printf(&sb, "X): ");
 3779         }
 3780         sbuf_finish(&sb);
 3781 
 3782         return(sbuf_len(&sb));
 3783 }
 3784 
 3785 path_id_t
 3786 xpt_path_path_id(struct cam_path *path)
 3787 {
 3788         return(path->bus->path_id);
 3789 }
 3790 
 3791 target_id_t
 3792 xpt_path_target_id(struct cam_path *path)
 3793 {
 3794         if (path->target != NULL)
 3795                 return (path->target->target_id);
 3796         else
 3797                 return (CAM_TARGET_WILDCARD);
 3798 }
 3799 
 3800 lun_id_t
 3801 xpt_path_lun_id(struct cam_path *path)
 3802 {
 3803         if (path->device != NULL)
 3804                 return (path->device->lun_id);
 3805         else
 3806                 return (CAM_LUN_WILDCARD);
 3807 }
 3808 
 3809 struct cam_sim *
 3810 xpt_path_sim(struct cam_path *path)
 3811 {
 3812 
 3813         return (path->bus->sim);
 3814 }
 3815 
 3816 struct cam_periph*
 3817 xpt_path_periph(struct cam_path *path)
 3818 {
 3819 
 3820         return (path->periph);
 3821 }
 3822 
 3823 /*
 3824  * Release a CAM control block for the caller.  Remit the cost of the structure
 3825  * to the device referenced by the path.  If the this device had no 'credits'
 3826  * and peripheral drivers have registered async callbacks for this notification
 3827  * call them now.
 3828  */
 3829 void
 3830 xpt_release_ccb(union ccb *free_ccb)
 3831 {
 3832         struct   cam_ed *device;
 3833         struct   cam_periph *periph;
 3834 
 3835         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_release_ccb\n"));
 3836         xpt_path_assert(free_ccb->ccb_h.path, MA_OWNED);
 3837         device = free_ccb->ccb_h.path->device;
 3838         periph = free_ccb->ccb_h.path->periph;
 3839 
 3840         xpt_free_ccb(free_ccb);
 3841         periph->periph_allocated--;
 3842         cam_ccbq_release_opening(&device->ccbq);
 3843         xpt_run_allocq(periph, 0);
 3844 }
 3845 
 3846 /* Functions accessed by SIM drivers */
 3847 
 3848 static struct xpt_xport_ops xport_default_ops = {
 3849         .alloc_device = xpt_alloc_device_default,
 3850         .action = xpt_action_default,
 3851         .async = xpt_dev_async_default,
 3852 };
 3853 static struct xpt_xport xport_default = {
 3854         .xport = XPORT_UNKNOWN,
 3855         .name = "unknown",
 3856         .ops = &xport_default_ops,
 3857 };
 3858 
 3859 CAM_XPT_XPORT(xport_default);
 3860 
 3861 /*
 3862  * A sim structure, listing the SIM entry points and instance
 3863  * identification info is passed to xpt_bus_register to hook the SIM
 3864  * into the CAM framework.  xpt_bus_register creates a cam_eb entry
 3865  * for this new bus and places it in the array of busses and assigns
 3866  * it a path_id.  The path_id may be influenced by "hard wiring"
 3867  * information specified by the user.  Once interrupt services are
 3868  * available, the bus will be probed.
 3869  */
 3870 int32_t
 3871 xpt_bus_register(struct cam_sim *sim, device_t parent, u_int32_t bus)
 3872 {
 3873         struct cam_eb *new_bus;
 3874         struct cam_eb *old_bus;
 3875         struct ccb_pathinq cpi;
 3876         struct cam_path *path;
 3877         cam_status status;
 3878 
 3879         sim->bus_id = bus;
 3880         new_bus = (struct cam_eb *)malloc(sizeof(*new_bus),
 3881                                           M_CAMXPT, M_NOWAIT|M_ZERO);
 3882         if (new_bus == NULL) {
 3883                 /* Couldn't satisfy request */
 3884                 return (CAM_RESRC_UNAVAIL);
 3885         }
 3886 
 3887         mtx_init(&new_bus->eb_mtx, "CAM bus lock", NULL, MTX_DEF);
 3888         TAILQ_INIT(&new_bus->et_entries);
 3889         cam_sim_hold(sim);
 3890         new_bus->sim = sim;
 3891         timevalclear(&new_bus->last_reset);
 3892         new_bus->flags = 0;
 3893         new_bus->refcount = 1;  /* Held until a bus_deregister event */
 3894         new_bus->generation = 0;
 3895 
 3896         xpt_lock_buses();
 3897         sim->path_id = new_bus->path_id =
 3898             xptpathid(sim->sim_name, sim->unit_number, sim->bus_id);
 3899         old_bus = TAILQ_FIRST(&xsoftc.xpt_busses);
 3900         while (old_bus != NULL
 3901             && old_bus->path_id < new_bus->path_id)
 3902                 old_bus = TAILQ_NEXT(old_bus, links);
 3903         if (old_bus != NULL)
 3904                 TAILQ_INSERT_BEFORE(old_bus, new_bus, links);
 3905         else
 3906                 TAILQ_INSERT_TAIL(&xsoftc.xpt_busses, new_bus, links);
 3907         xsoftc.bus_generation++;
 3908         xpt_unlock_buses();
 3909 
 3910         /*
 3911          * Set a default transport so that a PATH_INQ can be issued to
 3912          * the SIM.  This will then allow for probing and attaching of
 3913          * a more appropriate transport.
 3914          */
 3915         new_bus->xport = &xport_default;
 3916 
 3917         status = xpt_create_path(&path, /*periph*/NULL, sim->path_id,
 3918                                   CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
 3919         if (status != CAM_REQ_CMP) {
 3920                 xpt_release_bus(new_bus);
 3921                 return (CAM_RESRC_UNAVAIL);
 3922         }
 3923 
 3924         xpt_setup_ccb(&cpi.ccb_h, path, CAM_PRIORITY_NORMAL);
 3925         cpi.ccb_h.func_code = XPT_PATH_INQ;
 3926         xpt_action((union ccb *)&cpi);
 3927 
 3928         if (cpi.ccb_h.status == CAM_REQ_CMP) {
 3929                 struct xpt_xport **xpt;
 3930 
 3931                 SET_FOREACH(xpt, cam_xpt_xport_set) {
 3932                         if ((*xpt)->xport == cpi.transport) {
 3933                                 new_bus->xport = *xpt;
 3934                                 break;
 3935                         }
 3936                 }
 3937                 if (new_bus->xport == NULL) {
 3938                         xpt_print_path(path);
 3939                         printf("No transport found for %d\n", cpi.transport);
 3940                         xpt_release_bus(new_bus);
 3941                         free(path, M_CAMXPT);
 3942                         return (CAM_RESRC_UNAVAIL);
 3943                 }
 3944         }
 3945 
 3946         /* Notify interested parties */
 3947         if (sim->path_id != CAM_XPT_PATH_ID) {
 3948 
 3949                 xpt_async(AC_PATH_REGISTERED, path, &cpi);
 3950                 if ((cpi.hba_misc & PIM_NOSCAN) == 0) {
 3951                         union   ccb *scan_ccb;
 3952 
 3953                         /* Initiate bus rescan. */
 3954                         scan_ccb = xpt_alloc_ccb_nowait();
 3955                         if (scan_ccb != NULL) {
 3956                                 scan_ccb->ccb_h.path = path;
 3957                                 scan_ccb->ccb_h.func_code = XPT_SCAN_BUS;
 3958                                 scan_ccb->crcn.flags = 0;
 3959                                 xpt_rescan(scan_ccb);
 3960                         } else {
 3961                                 xpt_print(path,
 3962                                           "Can't allocate CCB to scan bus\n");
 3963                                 xpt_free_path(path);
 3964                         }
 3965                 } else
 3966                         xpt_free_path(path);
 3967         } else
 3968                 xpt_free_path(path);
 3969         return (CAM_SUCCESS);
 3970 }
 3971 
 3972 int32_t
 3973 xpt_bus_deregister(path_id_t pathid)
 3974 {
 3975         struct cam_path bus_path;
 3976         cam_status status;
 3977 
 3978         status = xpt_compile_path(&bus_path, NULL, pathid,
 3979                                   CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
 3980         if (status != CAM_REQ_CMP)
 3981                 return (status);
 3982 
 3983         xpt_async(AC_LOST_DEVICE, &bus_path, NULL);
 3984         xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL);
 3985 
 3986         /* Release the reference count held while registered. */
 3987         xpt_release_bus(bus_path.bus);
 3988         xpt_release_path(&bus_path);
 3989 
 3990         return (CAM_REQ_CMP);
 3991 }
 3992 
 3993 static path_id_t
 3994 xptnextfreepathid(void)
 3995 {
 3996         struct cam_eb *bus;
 3997         path_id_t pathid;
 3998         const char *strval;
 3999 
 4000         mtx_assert(&xsoftc.xpt_topo_lock, MA_OWNED);
 4001         pathid = 0;
 4002         bus = TAILQ_FIRST(&xsoftc.xpt_busses);
 4003 retry:
 4004         /* Find an unoccupied pathid */
 4005         while (bus != NULL && bus->path_id <= pathid) {
 4006                 if (bus->path_id == pathid)
 4007                         pathid++;
 4008                 bus = TAILQ_NEXT(bus, links);
 4009         }
 4010 
 4011         /*
 4012          * Ensure that this pathid is not reserved for
 4013          * a bus that may be registered in the future.
 4014          */
 4015         if (resource_string_value("scbus", pathid, "at", &strval) == 0) {
 4016                 ++pathid;
 4017                 /* Start the search over */
 4018                 goto retry;
 4019         }
 4020         return (pathid);
 4021 }
 4022 
 4023 static path_id_t
 4024 xptpathid(const char *sim_name, int sim_unit, int sim_bus)
 4025 {
 4026         path_id_t pathid;
 4027         int i, dunit, val;
 4028         char buf[32];
 4029         const char *dname;
 4030 
 4031         pathid = CAM_XPT_PATH_ID;
 4032         snprintf(buf, sizeof(buf), "%s%d", sim_name, sim_unit);
 4033         if (strcmp(buf, "xpt0") == 0 && sim_bus == 0)
 4034                 return (pathid);
 4035         i = 0;
 4036         while ((resource_find_match(&i, &dname, &dunit, "at", buf)) == 0) {
 4037                 if (strcmp(dname, "scbus")) {
 4038                         /* Avoid a bit of foot shooting. */
 4039                         continue;
 4040                 }
 4041                 if (dunit < 0)          /* unwired?! */
 4042                         continue;
 4043                 if (resource_int_value("scbus", dunit, "bus", &val) == 0) {
 4044                         if (sim_bus == val) {
 4045                                 pathid = dunit;
 4046                                 break;
 4047                         }
 4048                 } else if (sim_bus == 0) {
 4049                         /* Unspecified matches bus 0 */
 4050                         pathid = dunit;
 4051                         break;
 4052                 } else {
 4053                         printf("Ambiguous scbus configuration for %s%d "
 4054                                "bus %d, cannot wire down.  The kernel "
 4055                                "config entry for scbus%d should "
 4056                                "specify a controller bus.\n"
 4057                                "Scbus will be assigned dynamically.\n",
 4058                                sim_name, sim_unit, sim_bus, dunit);
 4059                         break;
 4060                 }
 4061         }
 4062 
 4063         if (pathid == CAM_XPT_PATH_ID)
 4064                 pathid = xptnextfreepathid();
 4065         return (pathid);
 4066 }
 4067 
 4068 static const char *
 4069 xpt_async_string(u_int32_t async_code)
 4070 {
 4071 
 4072         switch (async_code) {
 4073         case AC_BUS_RESET: return ("AC_BUS_RESET");
 4074         case AC_UNSOL_RESEL: return ("AC_UNSOL_RESEL");
 4075         case AC_SCSI_AEN: return ("AC_SCSI_AEN");
 4076         case AC_SENT_BDR: return ("AC_SENT_BDR");
 4077         case AC_PATH_REGISTERED: return ("AC_PATH_REGISTERED");
 4078         case AC_PATH_DEREGISTERED: return ("AC_PATH_DEREGISTERED");
 4079         case AC_FOUND_DEVICE: return ("AC_FOUND_DEVICE");
 4080         case AC_LOST_DEVICE: return ("AC_LOST_DEVICE");
 4081         case AC_TRANSFER_NEG: return ("AC_TRANSFER_NEG");
 4082         case AC_INQ_CHANGED: return ("AC_INQ_CHANGED");
 4083         case AC_GETDEV_CHANGED: return ("AC_GETDEV_CHANGED");
 4084         case AC_CONTRACT: return ("AC_CONTRACT");
 4085         case AC_ADVINFO_CHANGED: return ("AC_ADVINFO_CHANGED");
 4086         case AC_UNIT_ATTENTION: return ("AC_UNIT_ATTENTION");
 4087         }
 4088         return ("AC_UNKNOWN");
 4089 }
 4090 
 4091 static int
 4092 xpt_async_size(u_int32_t async_code)
 4093 {
 4094 
 4095         switch (async_code) {
 4096         case AC_BUS_RESET: return (0);
 4097         case AC_UNSOL_RESEL: return (0);
 4098         case AC_SCSI_AEN: return (0);
 4099         case AC_SENT_BDR: return (0);
 4100         case AC_PATH_REGISTERED: return (sizeof(struct ccb_pathinq));
 4101         case AC_PATH_DEREGISTERED: return (0);
 4102         case AC_FOUND_DEVICE: return (sizeof(struct ccb_getdev));
 4103         case AC_LOST_DEVICE: return (0);
 4104         case AC_TRANSFER_NEG: return (sizeof(struct ccb_trans_settings));
 4105         case AC_INQ_CHANGED: return (0);
 4106         case AC_GETDEV_CHANGED: return (0);
 4107         case AC_CONTRACT: return (sizeof(struct ac_contract));
 4108         case AC_ADVINFO_CHANGED: return (-1);
 4109         case AC_UNIT_ATTENTION: return (sizeof(struct ccb_scsiio));
 4110         }
 4111         return (0);
 4112 }
 4113 
 4114 static int
 4115 xpt_async_process_dev(struct cam_ed *device, void *arg)
 4116 {
 4117         union ccb *ccb = arg;
 4118         struct cam_path *path = ccb->ccb_h.path;
 4119         void *async_arg = ccb->casync.async_arg_ptr;
 4120         u_int32_t async_code = ccb->casync.async_code;
 4121         int relock;
 4122 
 4123         if (path->device != device
 4124          && path->device->lun_id != CAM_LUN_WILDCARD
 4125          && device->lun_id != CAM_LUN_WILDCARD)
 4126                 return (1);
 4127 
 4128         /*
 4129          * The async callback could free the device.
 4130          * If it is a broadcast async, it doesn't hold
 4131          * device reference, so take our own reference.
 4132          */
 4133         xpt_acquire_device(device);
 4134 
 4135         /*
 4136          * If async for specific device is to be delivered to
 4137          * the wildcard client, take the specific device lock.
 4138          * XXX: We may need a way for client to specify it.
 4139          */
 4140         if ((device->lun_id == CAM_LUN_WILDCARD &&
 4141              path->device->lun_id != CAM_LUN_WILDCARD) ||
 4142             (device->target->target_id == CAM_TARGET_WILDCARD &&
 4143              path->target->target_id != CAM_TARGET_WILDCARD) ||
 4144             (device->target->bus->path_id == CAM_BUS_WILDCARD &&
 4145              path->target->bus->path_id != CAM_BUS_WILDCARD)) {
 4146                 mtx_unlock(&device->device_mtx);
 4147                 xpt_path_lock(path);
 4148                 relock = 1;
 4149         } else
 4150                 relock = 0;
 4151 
 4152         (*(device->target->bus->xport->ops->async))(async_code,
 4153             device->target->bus, device->target, device, async_arg);
 4154         xpt_async_bcast(&device->asyncs, async_code, path, async_arg);
 4155 
 4156         if (relock) {
 4157                 xpt_path_unlock(path);
 4158                 mtx_lock(&device->device_mtx);
 4159         }
 4160         xpt_release_device(device);
 4161         return (1);
 4162 }
 4163 
 4164 static int
 4165 xpt_async_process_tgt(struct cam_et *target, void *arg)
 4166 {
 4167         union ccb *ccb = arg;
 4168         struct cam_path *path = ccb->ccb_h.path;
 4169 
 4170         if (path->target != target
 4171          && path->target->target_id != CAM_TARGET_WILDCARD
 4172          && target->target_id != CAM_TARGET_WILDCARD)
 4173                 return (1);
 4174 
 4175         if (ccb->casync.async_code == AC_SENT_BDR) {
 4176                 /* Update our notion of when the last reset occurred */
 4177                 microtime(&target->last_reset);
 4178         }
 4179 
 4180         return (xptdevicetraverse(target, NULL, xpt_async_process_dev, ccb));
 4181 }
 4182 
 4183 static void
 4184 xpt_async_process(struct cam_periph *periph, union ccb *ccb)
 4185 {
 4186         struct cam_eb *bus;
 4187         struct cam_path *path;
 4188         void *async_arg;
 4189         u_int32_t async_code;
 4190 
 4191         path = ccb->ccb_h.path;
 4192         async_code = ccb->casync.async_code;
 4193         async_arg = ccb->casync.async_arg_ptr;
 4194         CAM_DEBUG(path, CAM_DEBUG_TRACE | CAM_DEBUG_INFO,
 4195             ("xpt_async(%s)\n", xpt_async_string(async_code)));
 4196         bus = path->bus;
 4197 
 4198         if (async_code == AC_BUS_RESET) {
 4199                 /* Update our notion of when the last reset occurred */
 4200                 microtime(&bus->last_reset);
 4201         }
 4202 
 4203         xpttargettraverse(bus, NULL, xpt_async_process_tgt, ccb);
 4204 
 4205         /*
 4206          * If this wasn't a fully wildcarded async, tell all
 4207          * clients that want all async events.
 4208          */
 4209         if (bus != xpt_periph->path->bus) {
 4210                 xpt_path_lock(xpt_periph->path);
 4211                 xpt_async_process_dev(xpt_periph->path->device, ccb);
 4212                 xpt_path_unlock(xpt_periph->path);
 4213         }
 4214 
 4215         if (path->device != NULL && path->device->lun_id != CAM_LUN_WILDCARD)
 4216                 xpt_release_devq(path, 1, TRUE);
 4217         else
 4218                 xpt_release_simq(path->bus->sim, TRUE);
 4219         if (ccb->casync.async_arg_size > 0)
 4220                 free(async_arg, M_CAMXPT);
 4221         xpt_free_path(path);
 4222         xpt_free_ccb(ccb);
 4223 }
 4224 
 4225 static void
 4226 xpt_async_bcast(struct async_list *async_head,
 4227                 u_int32_t async_code,
 4228                 struct cam_path *path, void *async_arg)
 4229 {
 4230         struct async_node *cur_entry;
 4231         struct mtx *mtx;
 4232 
 4233         cur_entry = SLIST_FIRST(async_head);
 4234         while (cur_entry != NULL) {
 4235                 struct async_node *next_entry;
 4236                 /*
 4237                  * Grab the next list entry before we call the current
 4238                  * entry's callback.  This is because the callback function
 4239                  * can delete its async callback entry.
 4240                  */
 4241                 next_entry = SLIST_NEXT(cur_entry, links);
 4242                 if ((cur_entry->event_enable & async_code) != 0) {
 4243                         mtx = cur_entry->event_lock ?
 4244                             path->device->sim->mtx : NULL;
 4245                         if (mtx)
 4246                                 mtx_lock(mtx);
 4247                         cur_entry->callback(cur_entry->callback_arg,
 4248                                             async_code, path,
 4249                                             async_arg);
 4250                         if (mtx)
 4251                                 mtx_unlock(mtx);
 4252                 }
 4253                 cur_entry = next_entry;
 4254         }
 4255 }
 4256 
 4257 void
 4258 xpt_async(u_int32_t async_code, struct cam_path *path, void *async_arg)
 4259 {
 4260         union ccb *ccb;
 4261         int size;
 4262 
 4263         ccb = xpt_alloc_ccb_nowait();
 4264         if (ccb == NULL) {
 4265                 xpt_print(path, "Can't allocate CCB to send %s\n",
 4266                     xpt_async_string(async_code));
 4267                 return;
 4268         }
 4269 
 4270         if (xpt_clone_path(&ccb->ccb_h.path, path) != CAM_REQ_CMP) {
 4271                 xpt_print(path, "Can't allocate path to send %s\n",
 4272                     xpt_async_string(async_code));
 4273                 xpt_free_ccb(ccb);
 4274                 return;
 4275         }
 4276         ccb->ccb_h.path->periph = NULL;
 4277         ccb->ccb_h.func_code = XPT_ASYNC;
 4278         ccb->ccb_h.cbfcnp = xpt_async_process;
 4279         ccb->ccb_h.flags |= CAM_UNLOCKED;
 4280         ccb->casync.async_code = async_code;
 4281         ccb->casync.async_arg_size = 0;
 4282         size = xpt_async_size(async_code);
 4283         CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE,
 4284             ("xpt_async: func %#x %s aync_code %d %s\n",
 4285                 ccb->ccb_h.func_code,
 4286                 xpt_action_name(ccb->ccb_h.func_code),
 4287                 async_code,
 4288                 xpt_async_string(async_code)));
 4289         if (size > 0 && async_arg != NULL) {
 4290                 ccb->casync.async_arg_ptr = malloc(size, M_CAMXPT, M_NOWAIT);
 4291                 if (ccb->casync.async_arg_ptr == NULL) {
 4292                         xpt_print(path, "Can't allocate argument to send %s\n",
 4293                             xpt_async_string(async_code));
 4294                         xpt_free_path(ccb->ccb_h.path);
 4295                         xpt_free_ccb(ccb);
 4296                         return;
 4297                 }
 4298                 memcpy(ccb->casync.async_arg_ptr, async_arg, size);
 4299                 ccb->casync.async_arg_size = size;
 4300         } else if (size < 0) {
 4301                 ccb->casync.async_arg_ptr = async_arg;
 4302                 ccb->casync.async_arg_size = size;
 4303         }
 4304         if (path->device != NULL && path->device->lun_id != CAM_LUN_WILDCARD)
 4305                 xpt_freeze_devq(path, 1);
 4306         else
 4307                 xpt_freeze_simq(path->bus->sim, 1);
 4308         xpt_done(ccb);
 4309 }
 4310 
 4311 static void
 4312 xpt_dev_async_default(u_int32_t async_code, struct cam_eb *bus,
 4313                       struct cam_et *target, struct cam_ed *device,
 4314                       void *async_arg)
 4315 {
 4316 
 4317         /*
 4318          * We only need to handle events for real devices.
 4319          */
 4320         if (target->target_id == CAM_TARGET_WILDCARD
 4321          || device->lun_id == CAM_LUN_WILDCARD)
 4322                 return;
 4323 
 4324         printf("%s called\n", __func__);
 4325 }
 4326 
 4327 static uint32_t
 4328 xpt_freeze_devq_device(struct cam_ed *dev, u_int count)
 4329 {
 4330         struct cam_devq *devq;
 4331         uint32_t freeze;
 4332 
 4333         devq = dev->sim->devq;
 4334         mtx_assert(&devq->send_mtx, MA_OWNED);
 4335         CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE,
 4336             ("xpt_freeze_devq_device(%d) %u->%u\n", count,
 4337             dev->ccbq.queue.qfrozen_cnt, dev->ccbq.queue.qfrozen_cnt + count));
 4338         freeze = (dev->ccbq.queue.qfrozen_cnt += count);
 4339         /* Remove frozen device from sendq. */
 4340         if (device_is_queued(dev))
 4341                 camq_remove(&devq->send_queue, dev->devq_entry.index);
 4342         return (freeze);
 4343 }
 4344 
 4345 u_int32_t
 4346 xpt_freeze_devq(struct cam_path *path, u_int count)
 4347 {
 4348         struct cam_ed   *dev = path->device;
 4349         struct cam_devq *devq;
 4350         uint32_t         freeze;
 4351 
 4352         devq = dev->sim->devq;
 4353         mtx_lock(&devq->send_mtx);
 4354         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_freeze_devq(%d)\n", count));
 4355         freeze = xpt_freeze_devq_device(dev, count);
 4356         mtx_unlock(&devq->send_mtx);
 4357         return (freeze);
 4358 }
 4359 
 4360 u_int32_t
 4361 xpt_freeze_simq(struct cam_sim *sim, u_int count)
 4362 {
 4363         struct cam_devq *devq;
 4364         uint32_t         freeze;
 4365 
 4366         devq = sim->devq;
 4367         mtx_lock(&devq->send_mtx);
 4368         freeze = (devq->send_queue.qfrozen_cnt += count);
 4369         mtx_unlock(&devq->send_mtx);
 4370         return (freeze);
 4371 }
 4372 
 4373 static void
 4374 xpt_release_devq_timeout(void *arg)
 4375 {
 4376         struct cam_ed *dev;
 4377         struct cam_devq *devq;
 4378 
 4379         dev = (struct cam_ed *)arg;
 4380         CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE, ("xpt_release_devq_timeout\n"));
 4381         devq = dev->sim->devq;
 4382         mtx_assert(&devq->send_mtx, MA_OWNED);
 4383         if (xpt_release_devq_device(dev, /*count*/1, /*run_queue*/TRUE))
 4384                 xpt_run_devq(devq);
 4385 }
 4386 
 4387 void
 4388 xpt_release_devq(struct cam_path *path, u_int count, int run_queue)
 4389 {
 4390         struct cam_ed *dev;
 4391         struct cam_devq *devq;
 4392 
 4393         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_devq(%d, %d)\n",
 4394             count, run_queue));
 4395         dev = path->device;
 4396         devq = dev->sim->devq;
 4397         mtx_lock(&devq->send_mtx);
 4398         if (xpt_release_devq_device(dev, count, run_queue))
 4399                 xpt_run_devq(dev->sim->devq);
 4400         mtx_unlock(&devq->send_mtx);
 4401 }
 4402 
 4403 static int
 4404 xpt_release_devq_device(struct cam_ed *dev, u_int count, int run_queue)
 4405 {
 4406 
 4407         mtx_assert(&dev->sim->devq->send_mtx, MA_OWNED);
 4408         CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE,
 4409             ("xpt_release_devq_device(%d, %d) %u->%u\n", count, run_queue,
 4410             dev->ccbq.queue.qfrozen_cnt, dev->ccbq.queue.qfrozen_cnt - count));
 4411         if (count > dev->ccbq.queue.qfrozen_cnt) {
 4412 #ifdef INVARIANTS
 4413                 printf("xpt_release_devq(): requested %u > present %u\n",
 4414                     count, dev->ccbq.queue.qfrozen_cnt);
 4415 #endif
 4416                 count = dev->ccbq.queue.qfrozen_cnt;
 4417         }
 4418         dev->ccbq.queue.qfrozen_cnt -= count;
 4419         if (dev->ccbq.queue.qfrozen_cnt == 0) {
 4420                 /*
 4421                  * No longer need to wait for a successful
 4422                  * command completion.
 4423                  */
 4424                 dev->flags &= ~CAM_DEV_REL_ON_COMPLETE;
 4425                 /*
 4426                  * Remove any timeouts that might be scheduled
 4427                  * to release this queue.
 4428                  */
 4429                 if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
 4430                         callout_stop(&dev->callout);
 4431                         dev->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING;
 4432                 }
 4433                 /*
 4434                  * Now that we are unfrozen schedule the
 4435                  * device so any pending transactions are
 4436                  * run.
 4437                  */
 4438                 xpt_schedule_devq(dev->sim->devq, dev);
 4439         } else
 4440                 run_queue = 0;
 4441         return (run_queue);
 4442 }
 4443 
 4444 void
 4445 xpt_release_simq(struct cam_sim *sim, int run_queue)
 4446 {
 4447         struct cam_devq *devq;
 4448 
 4449         devq = sim->devq;
 4450         mtx_lock(&devq->send_mtx);
 4451         if (devq->send_queue.qfrozen_cnt <= 0) {
 4452 #ifdef INVARIANTS
 4453                 printf("xpt_release_simq: requested 1 > present %u\n",
 4454                     devq->send_queue.qfrozen_cnt);
 4455 #endif
 4456         } else
 4457                 devq->send_queue.qfrozen_cnt--;
 4458         if (devq->send_queue.qfrozen_cnt == 0) {
 4459                 /*
 4460                  * If there is a timeout scheduled to release this
 4461                  * sim queue, remove it.  The queue frozen count is
 4462                  * already at 0.
 4463                  */
 4464                 if ((sim->flags & CAM_SIM_REL_TIMEOUT_PENDING) != 0){
 4465                         callout_stop(&sim->callout);
 4466                         sim->flags &= ~CAM_SIM_REL_TIMEOUT_PENDING;
 4467                 }
 4468                 if (run_queue) {
 4469                         /*
 4470                          * Now that we are unfrozen run the send queue.
 4471                          */
 4472                         xpt_run_devq(sim->devq);
 4473                 }
 4474         }
 4475         mtx_unlock(&devq->send_mtx);
 4476 }
 4477 
 4478 /*
 4479  * XXX Appears to be unused.
 4480  */
 4481 static void
 4482 xpt_release_simq_timeout(void *arg)
 4483 {
 4484         struct cam_sim *sim;
 4485 
 4486         sim = (struct cam_sim *)arg;
 4487         xpt_release_simq(sim, /* run_queue */ TRUE);
 4488 }
 4489 
 4490 void
 4491 xpt_done(union ccb *done_ccb)
 4492 {
 4493         struct cam_doneq *queue;
 4494         int     run, hash;
 4495 
 4496         CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE,
 4497             ("xpt_done: func= %#x %s status %#x\n",
 4498                 done_ccb->ccb_h.func_code,
 4499                 xpt_action_name(done_ccb->ccb_h.func_code),
 4500                 done_ccb->ccb_h.status));
 4501         if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) == 0)
 4502                 return;
 4503 
 4504         /* Store the time the ccb was in the sim */
 4505         done_ccb->ccb_h.qos.sim_data = sbinuptime() - done_ccb->ccb_h.qos.sim_data;
 4506         hash = (done_ccb->ccb_h.path_id + done_ccb->ccb_h.target_id +
 4507             done_ccb->ccb_h.target_lun) % cam_num_doneqs;
 4508         queue = &cam_doneqs[hash];
 4509         mtx_lock(&queue->cam_doneq_mtx);
 4510         run = (queue->cam_doneq_sleep && STAILQ_EMPTY(&queue->cam_doneq));
 4511         STAILQ_INSERT_TAIL(&queue->cam_doneq, &done_ccb->ccb_h, sim_links.stqe);
 4512         done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX;
 4513         mtx_unlock(&queue->cam_doneq_mtx);
 4514         if (run)
 4515                 wakeup(&queue->cam_doneq);
 4516 }
 4517 
 4518 void
 4519 xpt_done_direct(union ccb *done_ccb)
 4520 {
 4521 
 4522         CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE,
 4523             ("xpt_done_direct: status %#x\n", done_ccb->ccb_h.status));
 4524         if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) == 0)
 4525                 return;
 4526 
 4527         /* Store the time the ccb was in the sim */
 4528         done_ccb->ccb_h.qos.sim_data = sbinuptime() - done_ccb->ccb_h.qos.sim_data;
 4529         xpt_done_process(&done_ccb->ccb_h);
 4530 }
 4531 
 4532 union ccb *
 4533 xpt_alloc_ccb()
 4534 {
 4535         union ccb *new_ccb;
 4536 
 4537         new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_WAITOK);
 4538         return (new_ccb);
 4539 }
 4540 
 4541 union ccb *
 4542 xpt_alloc_ccb_nowait()
 4543 {
 4544         union ccb *new_ccb;
 4545 
 4546         new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_NOWAIT);
 4547         return (new_ccb);
 4548 }
 4549 
 4550 void
 4551 xpt_free_ccb(union ccb *free_ccb)
 4552 {
 4553         free(free_ccb, M_CAMCCB);
 4554 }
 4555 
 4556 
 4557 
 4558 /* Private XPT functions */
 4559 
 4560 /*
 4561  * Get a CAM control block for the caller. Charge the structure to the device
 4562  * referenced by the path.  If we don't have sufficient resources to allocate
 4563  * more ccbs, we return NULL.
 4564  */
 4565 static union ccb *
 4566 xpt_get_ccb_nowait(struct cam_periph *periph)
 4567 {
 4568         union ccb *new_ccb;
 4569 
 4570         new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_NOWAIT);
 4571         if (new_ccb == NULL)
 4572                 return (NULL);
 4573         periph->periph_allocated++;
 4574         cam_ccbq_take_opening(&periph->path->device->ccbq);
 4575         return (new_ccb);
 4576 }
 4577 
 4578 static union ccb *
 4579 xpt_get_ccb(struct cam_periph *periph)
 4580 {
 4581         union ccb *new_ccb;
 4582 
 4583         cam_periph_unlock(periph);
 4584         new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_WAITOK);
 4585         cam_periph_lock(periph);
 4586         periph->periph_allocated++;
 4587         cam_ccbq_take_opening(&periph->path->device->ccbq);
 4588         return (new_ccb);
 4589 }
 4590 
 4591 union ccb *
 4592 cam_periph_getccb(struct cam_periph *periph, u_int32_t priority)
 4593 {
 4594         struct ccb_hdr *ccb_h;
 4595 
 4596         CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("cam_periph_getccb\n"));
 4597         cam_periph_assert(periph, MA_OWNED);
 4598         while ((ccb_h = SLIST_FIRST(&periph->ccb_list)) == NULL ||
 4599             ccb_h->pinfo.priority != priority) {
 4600                 if (priority < periph->immediate_priority) {
 4601                         periph->immediate_priority = priority;
 4602                         xpt_run_allocq(periph, 0);
 4603                 } else
 4604                         cam_periph_sleep(periph, &periph->ccb_list, PRIBIO,
 4605                             "cgticb", 0);
 4606         }
 4607         SLIST_REMOVE_HEAD(&periph->ccb_list, periph_links.sle);
 4608         return ((union ccb *)ccb_h);
 4609 }
 4610 
 4611 static void
 4612 xpt_acquire_bus(struct cam_eb *bus)
 4613 {
 4614 
 4615         xpt_lock_buses();
 4616         bus->refcount++;
 4617         xpt_unlock_buses();
 4618 }
 4619 
 4620 static void
 4621 xpt_release_bus(struct cam_eb *bus)
 4622 {
 4623 
 4624         xpt_lock_buses();
 4625         KASSERT(bus->refcount >= 1, ("bus->refcount >= 1"));
 4626         if (--bus->refcount > 0) {
 4627                 xpt_unlock_buses();
 4628                 return;
 4629         }
 4630         TAILQ_REMOVE(&xsoftc.xpt_busses, bus, links);
 4631         xsoftc.bus_generation++;
 4632         xpt_unlock_buses();
 4633         KASSERT(TAILQ_EMPTY(&bus->et_entries),
 4634             ("destroying bus, but target list is not empty"));
 4635         cam_sim_release(bus->sim);
 4636         mtx_destroy(&bus->eb_mtx);
 4637         free(bus, M_CAMXPT);
 4638 }
 4639 
 4640 static struct cam_et *
 4641 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id)
 4642 {
 4643         struct cam_et *cur_target, *target;
 4644 
 4645         mtx_assert(&xsoftc.xpt_topo_lock, MA_OWNED);
 4646         mtx_assert(&bus->eb_mtx, MA_OWNED);
 4647         target = (struct cam_et *)malloc(sizeof(*target), M_CAMXPT,
 4648                                          M_NOWAIT|M_ZERO);
 4649         if (target == NULL)
 4650                 return (NULL);
 4651 
 4652         TAILQ_INIT(&target->ed_entries);
 4653         target->bus = bus;
 4654         target->target_id = target_id;
 4655         target->refcount = 1;
 4656         target->generation = 0;
 4657         target->luns = NULL;
 4658         mtx_init(&target->luns_mtx, "CAM LUNs lock", NULL, MTX_DEF);
 4659         timevalclear(&target->last_reset);
 4660         /*
 4661          * Hold a reference to our parent bus so it
 4662          * will not go away before we do.
 4663          */
 4664         bus->refcount++;
 4665 
 4666         /* Insertion sort into our bus's target list */
 4667         cur_target = TAILQ_FIRST(&bus->et_entries);
 4668         while (cur_target != NULL && cur_target->target_id < target_id)
 4669                 cur_target = TAILQ_NEXT(cur_target, links);
 4670         if (cur_target != NULL) {
 4671                 TAILQ_INSERT_BEFORE(cur_target, target, links);
 4672         } else {
 4673                 TAILQ_INSERT_TAIL(&bus->et_entries, target, links);
 4674         }
 4675         bus->generation++;
 4676         return (target);
 4677 }
 4678 
 4679 static void
 4680 xpt_acquire_target(struct cam_et *target)
 4681 {
 4682         struct cam_eb *bus = target->bus;
 4683 
 4684         mtx_lock(&bus->eb_mtx);
 4685         target->refcount++;
 4686         mtx_unlock(&bus->eb_mtx);
 4687 }
 4688 
 4689 static void
 4690 xpt_release_target(struct cam_et *target)
 4691 {
 4692         struct cam_eb *bus = target->bus;
 4693 
 4694         mtx_lock(&bus->eb_mtx);
 4695         if (--target->refcount > 0) {
 4696                 mtx_unlock(&bus->eb_mtx);
 4697                 return;
 4698         }
 4699         TAILQ_REMOVE(&bus->et_entries, target, links);
 4700         bus->generation++;
 4701         mtx_unlock(&bus->eb_mtx);
 4702         KASSERT(TAILQ_EMPTY(&target->ed_entries),
 4703             ("destroying target, but device list is not empty"));
 4704         xpt_release_bus(bus);
 4705         mtx_destroy(&target->luns_mtx);
 4706         if (target->luns)
 4707                 free(target->luns, M_CAMXPT);
 4708         free(target, M_CAMXPT);
 4709 }
 4710 
 4711 static struct cam_ed *
 4712 xpt_alloc_device_default(struct cam_eb *bus, struct cam_et *target,
 4713                          lun_id_t lun_id)
 4714 {
 4715         struct cam_ed *device;
 4716 
 4717         device = xpt_alloc_device(bus, target, lun_id);
 4718         if (device == NULL)
 4719                 return (NULL);
 4720 
 4721         device->mintags = 1;
 4722         device->maxtags = 1;
 4723         return (device);
 4724 }
 4725 
 4726 static void
 4727 xpt_destroy_device(void *context, int pending)
 4728 {
 4729         struct cam_ed   *device = context;
 4730 
 4731         mtx_lock(&device->device_mtx);
 4732         mtx_destroy(&device->device_mtx);
 4733         free(device, M_CAMDEV);
 4734 }
 4735 
 4736 struct cam_ed *
 4737 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id)
 4738 {
 4739         struct cam_ed   *cur_device, *device;
 4740         struct cam_devq *devq;
 4741         cam_status status;
 4742 
 4743         mtx_assert(&bus->eb_mtx, MA_OWNED);
 4744         /* Make space for us in the device queue on our bus */
 4745         devq = bus->sim->devq;
 4746         mtx_lock(&devq->send_mtx);
 4747         status = cam_devq_resize(devq, devq->send_queue.array_size + 1);
 4748         mtx_unlock(&devq->send_mtx);
 4749         if (status != CAM_REQ_CMP)
 4750                 return (NULL);
 4751 
 4752         device = (struct cam_ed *)malloc(sizeof(*device),
 4753                                          M_CAMDEV, M_NOWAIT|M_ZERO);
 4754         if (device == NULL)
 4755                 return (NULL);
 4756 
 4757         cam_init_pinfo(&device->devq_entry);
 4758         device->target = target;
 4759         device->lun_id = lun_id;
 4760         device->sim = bus->sim;
 4761         if (cam_ccbq_init(&device->ccbq,
 4762                           bus->sim->max_dev_openings) != 0) {
 4763                 free(device, M_CAMDEV);
 4764                 return (NULL);
 4765         }
 4766         SLIST_INIT(&device->asyncs);
 4767         SLIST_INIT(&device->periphs);
 4768         device->generation = 0;
 4769         device->flags = CAM_DEV_UNCONFIGURED;
 4770         device->tag_delay_count = 0;
 4771         device->tag_saved_openings = 0;
 4772         device->refcount = 1;
 4773         mtx_init(&device->device_mtx, "CAM device lock", NULL, MTX_DEF);
 4774         callout_init_mtx(&device->callout, &devq->send_mtx, 0);
 4775         TASK_INIT(&device->device_destroy_task, 0, xpt_destroy_device, device);
 4776         /*
 4777          * Hold a reference to our parent bus so it
 4778          * will not go away before we do.
 4779          */
 4780         target->refcount++;
 4781 
 4782         cur_device = TAILQ_FIRST(&target->ed_entries);
 4783         while (cur_device != NULL && cur_device->lun_id < lun_id)
 4784                 cur_device = TAILQ_NEXT(cur_device, links);
 4785         if (cur_device != NULL)
 4786                 TAILQ_INSERT_BEFORE(cur_device, device, links);
 4787         else
 4788                 TAILQ_INSERT_TAIL(&target->ed_entries, device, links);
 4789         target->generation++;
 4790         return (device);
 4791 }
 4792 
 4793 void
 4794 xpt_acquire_device(struct cam_ed *device)
 4795 {
 4796         struct cam_eb *bus = device->target->bus;
 4797 
 4798         mtx_lock(&bus->eb_mtx);
 4799         device->refcount++;
 4800         mtx_unlock(&bus->eb_mtx);
 4801 }
 4802 
 4803 void
 4804 xpt_release_device(struct cam_ed *device)
 4805 {
 4806         struct cam_eb *bus = device->target->bus;
 4807         struct cam_devq *devq;
 4808 
 4809         mtx_lock(&bus->eb_mtx);
 4810         if (--device->refcount > 0) {
 4811                 mtx_unlock(&bus->eb_mtx);
 4812                 return;
 4813         }
 4814 
 4815         TAILQ_REMOVE(&device->target->ed_entries, device,links);
 4816         device->target->generation++;
 4817         mtx_unlock(&bus->eb_mtx);
 4818 
 4819         /* Release our slot in the devq */
 4820         devq = bus->sim->devq;
 4821         mtx_lock(&devq->send_mtx);
 4822         cam_devq_resize(devq, devq->send_queue.array_size - 1);
 4823         mtx_unlock(&devq->send_mtx);
 4824 
 4825         KASSERT(SLIST_EMPTY(&device->periphs),
 4826             ("destroying device, but periphs list is not empty"));
 4827         KASSERT(device->devq_entry.index == CAM_UNQUEUED_INDEX,
 4828             ("destroying device while still queued for ccbs"));
 4829 
 4830         if ((device->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0)
 4831                 callout_stop(&device->callout);
 4832 
 4833         xpt_release_target(device->target);
 4834 
 4835         cam_ccbq_fini(&device->ccbq);
 4836         /*
 4837          * Free allocated memory.  free(9) does nothing if the
 4838          * supplied pointer is NULL, so it is safe to call without
 4839          * checking.
 4840          */
 4841         free(device->supported_vpds, M_CAMXPT);
 4842         free(device->device_id, M_CAMXPT);
 4843         free(device->ext_inq, M_CAMXPT);
 4844         free(device->physpath, M_CAMXPT);
 4845         free(device->rcap_buf, M_CAMXPT);
 4846         free(device->serial_num, M_CAMXPT);
 4847         taskqueue_enqueue(xsoftc.xpt_taskq, &device->device_destroy_task);
 4848 }
 4849 
 4850 u_int32_t
 4851 xpt_dev_ccbq_resize(struct cam_path *path, int newopenings)
 4852 {
 4853         int     result;
 4854         struct  cam_ed *dev;
 4855 
 4856         dev = path->device;
 4857         mtx_lock(&dev->sim->devq->send_mtx);
 4858         result = cam_ccbq_resize(&dev->ccbq, newopenings);
 4859         mtx_unlock(&dev->sim->devq->send_mtx);
 4860         if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
 4861          || (dev->inq_flags & SID_CmdQue) != 0)
 4862                 dev->tag_saved_openings = newopenings;
 4863         return (result);
 4864 }
 4865 
 4866 static struct cam_eb *
 4867 xpt_find_bus(path_id_t path_id)
 4868 {
 4869         struct cam_eb *bus;
 4870 
 4871         xpt_lock_buses();
 4872         for (bus = TAILQ_FIRST(&xsoftc.xpt_busses);
 4873              bus != NULL;
 4874              bus = TAILQ_NEXT(bus, links)) {
 4875                 if (bus->path_id == path_id) {
 4876                         bus->refcount++;
 4877                         break;
 4878                 }
 4879         }
 4880         xpt_unlock_buses();
 4881         return (bus);
 4882 }
 4883 
 4884 static struct cam_et *
 4885 xpt_find_target(struct cam_eb *bus, target_id_t target_id)
 4886 {
 4887         struct cam_et *target;
 4888 
 4889         mtx_assert(&bus->eb_mtx, MA_OWNED);
 4890         for (target = TAILQ_FIRST(&bus->et_entries);
 4891              target != NULL;
 4892              target = TAILQ_NEXT(target, links)) {
 4893                 if (target->target_id == target_id) {
 4894                         target->refcount++;
 4895                         break;
 4896                 }
 4897         }
 4898         return (target);
 4899 }
 4900 
 4901 static struct cam_ed *
 4902 xpt_find_device(struct cam_et *target, lun_id_t lun_id)
 4903 {
 4904         struct cam_ed *device;
 4905 
 4906         mtx_assert(&target->bus->eb_mtx, MA_OWNED);
 4907         for (device = TAILQ_FIRST(&target->ed_entries);
 4908              device != NULL;
 4909              device = TAILQ_NEXT(device, links)) {
 4910                 if (device->lun_id == lun_id) {
 4911                         device->refcount++;
 4912                         break;
 4913                 }
 4914         }
 4915         return (device);
 4916 }
 4917 
 4918 void
 4919 xpt_start_tags(struct cam_path *path)
 4920 {
 4921         struct ccb_relsim crs;
 4922         struct cam_ed *device;
 4923         struct cam_sim *sim;
 4924         int    newopenings;
 4925 
 4926         device = path->device;
 4927         sim = path->bus->sim;
 4928         device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
 4929         xpt_freeze_devq(path, /*count*/1);
 4930         device->inq_flags |= SID_CmdQue;
 4931         if (device->tag_saved_openings != 0)
 4932                 newopenings = device->tag_saved_openings;
 4933         else
 4934                 newopenings = min(device->maxtags,
 4935                                   sim->max_tagged_dev_openings);
 4936         xpt_dev_ccbq_resize(path, newopenings);
 4937         xpt_async(AC_GETDEV_CHANGED, path, NULL);
 4938         xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL);
 4939         crs.ccb_h.func_code = XPT_REL_SIMQ;
 4940         crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
 4941         crs.openings
 4942             = crs.release_timeout
 4943             = crs.qfrozen_cnt
 4944             = 0;
 4945         xpt_action((union ccb *)&crs);
 4946 }
 4947 
 4948 void
 4949 xpt_stop_tags(struct cam_path *path)
 4950 {
 4951         struct ccb_relsim crs;
 4952         struct cam_ed *device;
 4953         struct cam_sim *sim;
 4954 
 4955         device = path->device;
 4956         sim = path->bus->sim;
 4957         device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
 4958         device->tag_delay_count = 0;
 4959         xpt_freeze_devq(path, /*count*/1);
 4960         device->inq_flags &= ~SID_CmdQue;
 4961         xpt_dev_ccbq_resize(path, sim->max_dev_openings);
 4962         xpt_async(AC_GETDEV_CHANGED, path, NULL);
 4963         xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL);
 4964         crs.ccb_h.func_code = XPT_REL_SIMQ;
 4965         crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
 4966         crs.openings
 4967             = crs.release_timeout
 4968             = crs.qfrozen_cnt
 4969             = 0;
 4970         xpt_action((union ccb *)&crs);
 4971 }
 4972 
 4973 static void
 4974 xpt_boot_delay(void *arg)
 4975 {
 4976 
 4977         xpt_release_boot();
 4978 }
 4979 
 4980 static void
 4981 xpt_config(void *arg)
 4982 {
 4983         /*
 4984          * Now that interrupts are enabled, go find our devices
 4985          */
 4986         if (taskqueue_start_threads(&xsoftc.xpt_taskq, 1, PRIBIO, "CAM taskq"))
 4987                 printf("xpt_config: failed to create taskqueue thread.\n");
 4988 
 4989         /* Setup debugging path */
 4990         if (cam_dflags != CAM_DEBUG_NONE) {
 4991                 if (xpt_create_path(&cam_dpath, NULL,
 4992                                     CAM_DEBUG_BUS, CAM_DEBUG_TARGET,
 4993                                     CAM_DEBUG_LUN) != CAM_REQ_CMP) {
 4994                         printf("xpt_config: xpt_create_path() failed for debug"
 4995                                " target %d:%d:%d, debugging disabled\n",
 4996                                CAM_DEBUG_BUS, CAM_DEBUG_TARGET, CAM_DEBUG_LUN);
 4997                         cam_dflags = CAM_DEBUG_NONE;
 4998                 }
 4999         } else
 5000                 cam_dpath = NULL;
 5001 
 5002         periphdriver_init(1);
 5003         xpt_hold_boot();
 5004         callout_init(&xsoftc.boot_callout, 1);
 5005         callout_reset_sbt(&xsoftc.boot_callout, SBT_1MS * xsoftc.boot_delay, 0,
 5006             xpt_boot_delay, NULL, 0);
 5007         /* Fire up rescan thread. */
 5008         if (kproc_kthread_add(xpt_scanner_thread, NULL, &cam_proc, NULL, 0, 0,
 5009             "cam", "scanner")) {
 5010                 printf("xpt_config: failed to create rescan thread.\n");
 5011         }
 5012 }
 5013 
 5014 void
 5015 xpt_hold_boot(void)
 5016 {
 5017         xpt_lock_buses();
 5018         xsoftc.buses_to_config++;
 5019         xpt_unlock_buses();
 5020 }
 5021 
 5022 void
 5023 xpt_release_boot(void)
 5024 {
 5025         xpt_lock_buses();
 5026         xsoftc.buses_to_config--;
 5027         if (xsoftc.buses_to_config == 0 && xsoftc.buses_config_done == 0) {
 5028                 struct  xpt_task *task;
 5029 
 5030                 xsoftc.buses_config_done = 1;
 5031                 xpt_unlock_buses();
 5032                 /* Call manually because we don't have any busses */
 5033                 task = malloc(sizeof(struct xpt_task), M_CAMXPT, M_NOWAIT);
 5034                 if (task != NULL) {
 5035                         TASK_INIT(&task->task, 0, xpt_finishconfig_task, task);
 5036                         taskqueue_enqueue(taskqueue_thread, &task->task);
 5037                 }
 5038         } else
 5039                 xpt_unlock_buses();
 5040 }
 5041 
 5042 /*
 5043  * If the given device only has one peripheral attached to it, and if that
 5044  * peripheral is the passthrough driver, announce it.  This insures that the
 5045  * user sees some sort of announcement for every peripheral in their system.
 5046  */
 5047 static int
 5048 xptpassannouncefunc(struct cam_ed *device, void *arg)
 5049 {
 5050         struct cam_periph *periph;
 5051         int i;
 5052 
 5053         for (periph = SLIST_FIRST(&device->periphs), i = 0; periph != NULL;
 5054              periph = SLIST_NEXT(periph, periph_links), i++);
 5055 
 5056         periph = SLIST_FIRST(&device->periphs);
 5057         if ((i == 1)
 5058          && (strncmp(periph->periph_name, "pass", 4) == 0))
 5059                 xpt_announce_periph(periph, NULL);
 5060 
 5061         return(1);
 5062 }
 5063 
 5064 static void
 5065 xpt_finishconfig_task(void *context, int pending)
 5066 {
 5067 
 5068         periphdriver_init(2);
 5069         /*
 5070          * Check for devices with no "standard" peripheral driver
 5071          * attached.  For any devices like that, announce the
 5072          * passthrough driver so the user will see something.
 5073          */
 5074         if (!bootverbose)
 5075                 xpt_for_all_devices(xptpassannouncefunc, NULL);
 5076 
 5077         /* Release our hook so that the boot can continue. */
 5078         config_intrhook_disestablish(xsoftc.xpt_config_hook);
 5079         free(xsoftc.xpt_config_hook, M_CAMXPT);
 5080         xsoftc.xpt_config_hook = NULL;
 5081 
 5082         free(context, M_CAMXPT);
 5083 }
 5084 
 5085 cam_status
 5086 xpt_register_async(int event, ac_callback_t *cbfunc, void *cbarg,
 5087                    struct cam_path *path)
 5088 {
 5089         struct ccb_setasync csa;
 5090         cam_status status;
 5091         int xptpath = 0;
 5092 
 5093         if (path == NULL) {
 5094                 status = xpt_create_path(&path, /*periph*/NULL, CAM_XPT_PATH_ID,
 5095                                          CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
 5096                 if (status != CAM_REQ_CMP)
 5097                         return (status);
 5098                 xpt_path_lock(path);
 5099                 xptpath = 1;
 5100         }
 5101 
 5102         xpt_setup_ccb(&csa.ccb_h, path, CAM_PRIORITY_NORMAL);
 5103         csa.ccb_h.func_code = XPT_SASYNC_CB;
 5104         csa.event_enable = event;
 5105         csa.callback = cbfunc;
 5106         csa.callback_arg = cbarg;
 5107         xpt_action((union ccb *)&csa);
 5108         status = csa.ccb_h.status;
 5109 
 5110         CAM_DEBUG(csa.ccb_h.path, CAM_DEBUG_TRACE,
 5111             ("xpt_register_async: func %p\n", cbfunc));
 5112 
 5113         if (xptpath) {
 5114                 xpt_path_unlock(path);
 5115                 xpt_free_path(path);
 5116         }
 5117 
 5118         if ((status == CAM_REQ_CMP) &&
 5119             (csa.event_enable & AC_FOUND_DEVICE)) {
 5120                 /*
 5121                  * Get this peripheral up to date with all
 5122                  * the currently existing devices.
 5123                  */
 5124                 xpt_for_all_devices(xptsetasyncfunc, &csa);
 5125         }
 5126         if ((status == CAM_REQ_CMP) &&
 5127             (csa.event_enable & AC_PATH_REGISTERED)) {
 5128                 /*
 5129                  * Get this peripheral up to date with all
 5130                  * the currently existing busses.
 5131                  */
 5132                 xpt_for_all_busses(xptsetasyncbusfunc, &csa);
 5133         }
 5134 
 5135         return (status);
 5136 }
 5137 
 5138 static void
 5139 xptaction(struct cam_sim *sim, union ccb *work_ccb)
 5140 {
 5141         CAM_DEBUG(work_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xptaction\n"));
 5142 
 5143         switch (work_ccb->ccb_h.func_code) {
 5144         /* Common cases first */
 5145         case XPT_PATH_INQ:              /* Path routing inquiry */
 5146         {
 5147                 struct ccb_pathinq *cpi;
 5148 
 5149                 cpi = &work_ccb->cpi;
 5150                 cpi->version_num = 1; /* XXX??? */
 5151                 cpi->hba_inquiry = 0;
 5152                 cpi->target_sprt = 0;
 5153                 cpi->hba_misc = 0;
 5154                 cpi->hba_eng_cnt = 0;
 5155                 cpi->max_target = 0;
 5156                 cpi->max_lun = 0;
 5157                 cpi->initiator_id = 0;
 5158                 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
 5159                 strlcpy(cpi->hba_vid, "", HBA_IDLEN);
 5160                 strlcpy(cpi->dev_name, sim->sim_name, DEV_IDLEN);
 5161                 cpi->unit_number = sim->unit_number;
 5162                 cpi->bus_id = sim->bus_id;
 5163                 cpi->base_transfer_speed = 0;
 5164                 cpi->protocol = PROTO_UNSPECIFIED;
 5165                 cpi->protocol_version = PROTO_VERSION_UNSPECIFIED;
 5166                 cpi->transport = XPORT_UNSPECIFIED;
 5167                 cpi->transport_version = XPORT_VERSION_UNSPECIFIED;
 5168                 cpi->ccb_h.status = CAM_REQ_CMP;
 5169                 xpt_done(work_ccb);
 5170                 break;
 5171         }
 5172         default:
 5173                 work_ccb->ccb_h.status = CAM_REQ_INVALID;
 5174                 xpt_done(work_ccb);
 5175                 break;
 5176         }
 5177 }
 5178 
 5179 /*
 5180  * The xpt as a "controller" has no interrupt sources, so polling
 5181  * is a no-op.
 5182  */
 5183 static void
 5184 xptpoll(struct cam_sim *sim)
 5185 {
 5186 }
 5187 
 5188 void
 5189 xpt_lock_buses(void)
 5190 {
 5191         mtx_lock(&xsoftc.xpt_topo_lock);
 5192 }
 5193 
 5194 void
 5195 xpt_unlock_buses(void)
 5196 {
 5197         mtx_unlock(&xsoftc.xpt_topo_lock);
 5198 }
 5199 
 5200 struct mtx *
 5201 xpt_path_mtx(struct cam_path *path)
 5202 {
 5203 
 5204         return (&path->device->device_mtx);
 5205 }
 5206 
 5207 static void
 5208 xpt_done_process(struct ccb_hdr *ccb_h)
 5209 {
 5210         struct cam_sim *sim;
 5211         struct cam_devq *devq;
 5212         struct mtx *mtx = NULL;
 5213 
 5214         if (ccb_h->flags & CAM_HIGH_POWER) {
 5215                 struct highpowerlist    *hphead;
 5216                 struct cam_ed           *device;
 5217 
 5218                 mtx_lock(&xsoftc.xpt_highpower_lock);
 5219                 hphead = &xsoftc.highpowerq;
 5220 
 5221                 device = STAILQ_FIRST(hphead);
 5222 
 5223                 /*
 5224                  * Increment the count since this command is done.
 5225                  */
 5226                 xsoftc.num_highpower++;
 5227 
 5228                 /*
 5229                  * Any high powered commands queued up?
 5230                  */
 5231                 if (device != NULL) {
 5232 
 5233                         STAILQ_REMOVE_HEAD(hphead, highpowerq_entry);
 5234                         mtx_unlock(&xsoftc.xpt_highpower_lock);
 5235 
 5236                         mtx_lock(&device->sim->devq->send_mtx);
 5237                         xpt_release_devq_device(device,
 5238                                          /*count*/1, /*runqueue*/TRUE);
 5239                         mtx_unlock(&device->sim->devq->send_mtx);
 5240                 } else
 5241                         mtx_unlock(&xsoftc.xpt_highpower_lock);
 5242         }
 5243 
 5244         sim = ccb_h->path->bus->sim;
 5245 
 5246         if (ccb_h->status & CAM_RELEASE_SIMQ) {
 5247                 xpt_release_simq(sim, /*run_queue*/FALSE);
 5248                 ccb_h->status &= ~CAM_RELEASE_SIMQ;
 5249         }
 5250 
 5251         if ((ccb_h->flags & CAM_DEV_QFRZDIS)
 5252          && (ccb_h->status & CAM_DEV_QFRZN)) {
 5253                 xpt_release_devq(ccb_h->path, /*count*/1, /*run_queue*/TRUE);
 5254                 ccb_h->status &= ~CAM_DEV_QFRZN;
 5255         }
 5256 
 5257         devq = sim->devq;
 5258         if ((ccb_h->func_code & XPT_FC_USER_CCB) == 0) {
 5259                 struct cam_ed *dev = ccb_h->path->device;
 5260 
 5261                 mtx_lock(&devq->send_mtx);
 5262                 devq->send_active--;
 5263                 devq->send_openings++;
 5264                 cam_ccbq_ccb_done(&dev->ccbq, (union ccb *)ccb_h);
 5265 
 5266                 if (((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
 5267                   && (dev->ccbq.dev_active == 0))) {
 5268                         dev->flags &= ~CAM_DEV_REL_ON_QUEUE_EMPTY;
 5269                         xpt_release_devq_device(dev, /*count*/1,
 5270                                          /*run_queue*/FALSE);
 5271                 }
 5272 
 5273                 if (((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0
 5274                   && (ccb_h->status&CAM_STATUS_MASK) != CAM_REQUEUE_REQ)) {
 5275                         dev->flags &= ~CAM_DEV_REL_ON_COMPLETE;
 5276                         xpt_release_devq_device(dev, /*count*/1,
 5277                                          /*run_queue*/FALSE);
 5278                 }
 5279 
 5280                 if (!device_is_queued(dev))
 5281                         (void)xpt_schedule_devq(devq, dev);
 5282                 xpt_run_devq(devq);
 5283                 mtx_unlock(&devq->send_mtx);
 5284 
 5285                 if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0) {
 5286                         mtx = xpt_path_mtx(ccb_h->path);
 5287                         mtx_lock(mtx);
 5288 
 5289                         if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
 5290                          && (--dev->tag_delay_count == 0))
 5291                                 xpt_start_tags(ccb_h->path);
 5292                 }
 5293         }
 5294 
 5295         if ((ccb_h->flags & CAM_UNLOCKED) == 0) {
 5296                 if (mtx == NULL) {
 5297                         mtx = xpt_path_mtx(ccb_h->path);
 5298                         mtx_lock(mtx);
 5299                 }
 5300         } else {
 5301                 if (mtx != NULL) {
 5302                         mtx_unlock(mtx);
 5303                         mtx = NULL;
 5304                 }
 5305         }
 5306 
 5307         /* Call the peripheral driver's callback */
 5308         ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
 5309         (*ccb_h->cbfcnp)(ccb_h->path->periph, (union ccb *)ccb_h);
 5310         if (mtx != NULL)
 5311                 mtx_unlock(mtx);
 5312 }
 5313 
 5314 void
 5315 xpt_done_td(void *arg)
 5316 {
 5317         struct cam_doneq *queue = arg;
 5318         struct ccb_hdr *ccb_h;
 5319         STAILQ_HEAD(, ccb_hdr)  doneq;
 5320 
 5321         STAILQ_INIT(&doneq);
 5322         mtx_lock(&queue->cam_doneq_mtx);
 5323         while (1) {
 5324                 while (STAILQ_EMPTY(&queue->cam_doneq)) {
 5325                         queue->cam_doneq_sleep = 1;
 5326                         msleep(&queue->cam_doneq, &queue->cam_doneq_mtx,
 5327                             PRIBIO, "-", 0);
 5328                         queue->cam_doneq_sleep = 0;
 5329                 }
 5330                 STAILQ_CONCAT(&doneq, &queue->cam_doneq);
 5331                 mtx_unlock(&queue->cam_doneq_mtx);
 5332 
 5333                 THREAD_NO_SLEEPING();
 5334                 while ((ccb_h = STAILQ_FIRST(&doneq)) != NULL) {
 5335                         STAILQ_REMOVE_HEAD(&doneq, sim_links.stqe);
 5336                         xpt_done_process(ccb_h);
 5337                 }
 5338                 THREAD_SLEEPING_OK();
 5339 
 5340                 mtx_lock(&queue->cam_doneq_mtx);
 5341         }
 5342 }
 5343 
 5344 static void
 5345 camisr_runqueue(void)
 5346 {
 5347         struct  ccb_hdr *ccb_h;
 5348         struct cam_doneq *queue;
 5349         int i;
 5350 
 5351         /* Process global queues. */
 5352         for (i = 0; i < cam_num_doneqs; i++) {
 5353                 queue = &cam_doneqs[i];
 5354                 mtx_lock(&queue->cam_doneq_mtx);
 5355                 while ((ccb_h = STAILQ_FIRST(&queue->cam_doneq)) != NULL) {
 5356                         STAILQ_REMOVE_HEAD(&queue->cam_doneq, sim_links.stqe);
 5357                         mtx_unlock(&queue->cam_doneq_mtx);
 5358                         xpt_done_process(ccb_h);
 5359                         mtx_lock(&queue->cam_doneq_mtx);
 5360                 }
 5361                 mtx_unlock(&queue->cam_doneq_mtx);
 5362         }
 5363 }
 5364 
 5365 struct kv 
 5366 {
 5367         uint32_t v;
 5368         const char *name;
 5369 };
 5370 
 5371 static struct kv map[] = {
 5372         { XPT_NOOP, "XPT_NOOP" },
 5373         { XPT_SCSI_IO, "XPT_SCSI_IO" },
 5374         { XPT_GDEV_TYPE, "XPT_GDEV_TYPE" },
 5375         { XPT_GDEVLIST, "XPT_GDEVLIST" },
 5376         { XPT_PATH_INQ, "XPT_PATH_INQ" },
 5377         { XPT_REL_SIMQ, "XPT_REL_SIMQ" },
 5378         { XPT_SASYNC_CB, "XPT_SASYNC_CB" },
 5379         { XPT_SDEV_TYPE, "XPT_SDEV_TYPE" },
 5380         { XPT_SCAN_BUS, "XPT_SCAN_BUS" },
 5381         { XPT_DEV_MATCH, "XPT_DEV_MATCH" },
 5382         { XPT_DEBUG, "XPT_DEBUG" },
 5383         { XPT_PATH_STATS, "XPT_PATH_STATS" },
 5384         { XPT_GDEV_STATS, "XPT_GDEV_STATS" },
 5385         { XPT_DEV_ADVINFO, "XPT_DEV_ADVINFO" },
 5386         { XPT_ASYNC, "XPT_ASYNC" },
 5387         { XPT_ABORT, "XPT_ABORT" },
 5388         { XPT_RESET_BUS, "XPT_RESET_BUS" },
 5389         { XPT_RESET_DEV, "XPT_RESET_DEV" },
 5390         { XPT_TERM_IO, "XPT_TERM_IO" },
 5391         { XPT_SCAN_LUN, "XPT_SCAN_LUN" },
 5392         { XPT_GET_TRAN_SETTINGS, "XPT_GET_TRAN_SETTINGS" },
 5393         { XPT_SET_TRAN_SETTINGS, "XPT_SET_TRAN_SETTINGS" },
 5394         { XPT_CALC_GEOMETRY, "XPT_CALC_GEOMETRY" },
 5395         { XPT_ATA_IO, "XPT_ATA_IO" },
 5396         { XPT_GET_SIM_KNOB, "XPT_GET_SIM_KNOB" },
 5397         { XPT_SET_SIM_KNOB, "XPT_SET_SIM_KNOB" },
 5398         { XPT_NVME_IO, "XPT_NVME_IO" },
 5399         { XPT_MMCSD_IO, "XPT_MMCSD_IO" },
 5400         { XPT_SMP_IO, "XPT_SMP_IO" },
 5401         { XPT_SCAN_TGT, "XPT_SCAN_TGT" },
 5402         { XPT_NVME_ADMIN, "XPT_NVME_ADMIN" },
 5403         { XPT_ENG_INQ, "XPT_ENG_INQ" },
 5404         { XPT_ENG_EXEC, "XPT_ENG_EXEC" },
 5405         { XPT_EN_LUN, "XPT_EN_LUN" },
 5406         { XPT_TARGET_IO, "XPT_TARGET_IO" },
 5407         { XPT_ACCEPT_TARGET_IO, "XPT_ACCEPT_TARGET_IO" },
 5408         { XPT_CONT_TARGET_IO, "XPT_CONT_TARGET_IO" },
 5409         { XPT_IMMED_NOTIFY, "XPT_IMMED_NOTIFY" },
 5410         { XPT_NOTIFY_ACK, "XPT_NOTIFY_ACK" },
 5411         { XPT_IMMEDIATE_NOTIFY, "XPT_IMMEDIATE_NOTIFY" },
 5412         { XPT_NOTIFY_ACKNOWLEDGE, "XPT_NOTIFY_ACKNOWLEDGE" },
 5413         { 0, 0 }
 5414 };
 5415 
 5416 static const char *
 5417 xpt_action_name(uint32_t action) 
 5418 {
 5419         static char buffer[32]; /* Only for unknown messages -- racy */
 5420         struct kv *walker = map;
 5421 
 5422         while (walker->name != NULL) {
 5423                 if (walker->v == action)
 5424                         return (walker->name);
 5425                 walker++;
 5426         }
 5427 
 5428         snprintf(buffer, sizeof(buffer), "%#x", action);
 5429         return (buffer);
 5430 }

Cache object: 19eac8c39dd0ca7e10cb32ebadca169b


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.