The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/cam/cam_xpt.c

Version: -  FREEBSD  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-2  -  FREEBSD-11-1  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-4  -  FREEBSD-10-3  -  FREEBSD-10-2  -  FREEBSD-10-1  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-3  -  FREEBSD-9-2  -  FREEBSD-9-1  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-4  -  FREEBSD-8-3  -  FREEBSD-8-2  -  FREEBSD-8-1  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-4  -  FREEBSD-7-3  -  FREEBSD-7-2  -  FREEBSD-7-1  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-4  -  FREEBSD-6-3  -  FREEBSD-6-2  -  FREEBSD-6-1  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-5  -  FREEBSD-5-4  -  FREEBSD-5-3  -  FREEBSD-5-2  -  FREEBSD-5-1  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  linux-2.6  -  linux-2.4.22  -  MK83  -  MK84  -  PLAN9  -  DFBSD  -  NETBSD  -  NETBSD5  -  NETBSD4  -  NETBSD3  -  NETBSD20  -  OPENBSD  -  xnu-517  -  xnu-792  -  xnu-792.6.70  -  xnu-1228  -  xnu-1456.1.26  -  xnu-1699.24.8  -  xnu-2050.18.24  -  OPENSOLARIS  -  minix-3-1-1 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Implementation of the Common Access Method Transport (XPT) layer.
    3  *
    4  * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs.
    5  * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry.
    6  * All rights reserved.
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions, and the following disclaimer,
   13  *    without modification, immediately at the beginning of the file.
   14  * 2. The name of the author may not be used to endorse or promote products
   15  *    derived from this software without specific prior written permission.
   16  *
   17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   20  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
   21  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   27  * SUCH DAMAGE.
   28  */
   29 
   30 #include <sys/cdefs.h>
   31 __FBSDID("$FreeBSD$");
   32 
   33 #include <sys/param.h>
   34 #include <sys/bus.h>
   35 #include <sys/systm.h>
   36 #include <sys/types.h>
   37 #include <sys/malloc.h>
   38 #include <sys/kernel.h>
   39 #include <sys/time.h>
   40 #include <sys/conf.h>
   41 #include <sys/fcntl.h>
   42 #include <sys/md5.h>
   43 #include <sys/interrupt.h>
   44 #include <sys/sbuf.h>
   45 #include <sys/taskqueue.h>
   46 
   47 #include <sys/lock.h>
   48 #include <sys/mutex.h>
   49 #include <sys/sysctl.h>
   50 #include <sys/kthread.h>
   51 
   52 #ifdef PC98
   53 #include <pc98/pc98/pc98_machdep.h>     /* geometry translation */
   54 #endif
   55 
   56 #include <cam/cam.h>
   57 #include <cam/cam_ccb.h>
   58 #include <cam/cam_periph.h>
   59 #include <cam/cam_sim.h>
   60 #include <cam/cam_xpt.h>
   61 #include <cam/cam_xpt_sim.h>
   62 #include <cam/cam_xpt_periph.h>
   63 #include <cam/cam_debug.h>
   64 
   65 #include <cam/scsi/scsi_all.h>
   66 #include <cam/scsi/scsi_message.h>
   67 #include <cam/scsi/scsi_pass.h>
   68 #include <machine/stdarg.h>     /* for xpt_print below */
   69 #include "opt_cam.h"
   70 
   71 /* Datastructures internal to the xpt layer */
   72 MALLOC_DEFINE(M_CAMXPT, "CAM XPT", "CAM XPT buffers");
   73 
   74 /* Object for defering XPT actions to a taskqueue */
   75 struct xpt_task {
   76         struct task     task;
   77         void            *data1;
   78         uintptr_t       data2;
   79 };
   80 
   81 /*
   82  * Definition of an async handler callback block.  These are used to add
   83  * SIMs and peripherals to the async callback lists.
   84  */
   85 struct async_node {
   86         SLIST_ENTRY(async_node) links;
   87         u_int32_t       event_enable;   /* Async Event enables */
   88         void            (*callback)(void *arg, u_int32_t code,
   89                                     struct cam_path *path, void *args);
   90         void            *callback_arg;
   91 };
   92 
   93 SLIST_HEAD(async_list, async_node);
   94 SLIST_HEAD(periph_list, cam_periph);
   95 
   96 /*
   97  * This is the maximum number of high powered commands (e.g. start unit)
   98  * that can be outstanding at a particular time.
   99  */
  100 #ifndef CAM_MAX_HIGHPOWER
  101 #define CAM_MAX_HIGHPOWER  4
  102 #endif
  103 
  104 /*
  105  * Structure for queueing a device in a run queue.
  106  * There is one run queue for allocating new ccbs,
  107  * and another for sending ccbs to the controller.
  108  */
  109 struct cam_ed_qinfo {
  110         cam_pinfo pinfo;
  111         struct    cam_ed *device;
  112 };
  113 
  114 /*
  115  * The CAM EDT (Existing Device Table) contains the device information for
  116  * all devices for all busses in the system.  The table contains a
  117  * cam_ed structure for each device on the bus.
  118  */
  119 struct cam_ed {
  120         TAILQ_ENTRY(cam_ed) links;
  121         struct  cam_ed_qinfo alloc_ccb_entry;
  122         struct  cam_ed_qinfo send_ccb_entry;
  123         struct  cam_et   *target;
  124         struct  cam_sim  *sim;
  125         lun_id_t         lun_id;
  126         struct  camq drvq;              /*
  127                                          * Queue of type drivers wanting to do
  128                                          * work on this device.
  129                                          */
  130         struct  cam_ccbq ccbq;          /* Queue of pending ccbs */
  131         struct  async_list asyncs;      /* Async callback info for this B/T/L */
  132         struct  periph_list periphs;    /* All attached devices */
  133         u_int   generation;             /* Generation number */
  134         struct  cam_periph *owner;      /* Peripheral driver's ownership tag */
  135         struct  xpt_quirk_entry *quirk; /* Oddities about this device */
  136                                         /* Storage for the inquiry data */
  137         cam_proto        protocol;
  138         u_int            protocol_version;
  139         cam_xport        transport;
  140         u_int            transport_version;
  141         struct           scsi_inquiry_data inq_data;
  142         u_int8_t         inq_flags;     /*
  143                                          * Current settings for inquiry flags.
  144                                          * This allows us to override settings
  145                                          * like disconnection and tagged
  146                                          * queuing for a device.
  147                                          */
  148         u_int8_t         queue_flags;   /* Queue flags from the control page */
  149         u_int8_t         serial_num_len;
  150         u_int8_t        *serial_num;
  151         u_int32_t        qfrozen_cnt;
  152         u_int32_t        flags;
  153 #define CAM_DEV_UNCONFIGURED            0x01
  154 #define CAM_DEV_REL_TIMEOUT_PENDING     0x02
  155 #define CAM_DEV_REL_ON_COMPLETE         0x04
  156 #define CAM_DEV_REL_ON_QUEUE_EMPTY      0x08
  157 #define CAM_DEV_RESIZE_QUEUE_NEEDED     0x10
  158 #define CAM_DEV_TAG_AFTER_COUNT         0x20
  159 #define CAM_DEV_INQUIRY_DATA_VALID      0x40
  160 #define CAM_DEV_IN_DV                   0x80
  161 #define CAM_DEV_DV_HIT_BOTTOM           0x100
  162         u_int32_t        tag_delay_count;
  163 #define CAM_TAG_DELAY_COUNT             5
  164         u_int32_t        tag_saved_openings;
  165         u_int32_t        refcount;
  166         struct callout   callout;
  167 };
  168 
  169 /*
  170  * Each target is represented by an ET (Existing Target).  These
  171  * entries are created when a target is successfully probed with an
  172  * identify, and removed when a device fails to respond after a number
  173  * of retries, or a bus rescan finds the device missing.
  174  */
  175 struct cam_et {
  176         TAILQ_HEAD(, cam_ed) ed_entries;
  177         TAILQ_ENTRY(cam_et) links;
  178         struct  cam_eb  *bus;
  179         target_id_t     target_id;
  180         u_int32_t       refcount;
  181         u_int           generation;
  182         struct          timeval last_reset;
  183 };
  184 
  185 /*
  186  * Each bus is represented by an EB (Existing Bus).  These entries
  187  * are created by calls to xpt_bus_register and deleted by calls to
  188  * xpt_bus_deregister.
  189  */
  190 struct cam_eb {
  191         TAILQ_HEAD(, cam_et) et_entries;
  192         TAILQ_ENTRY(cam_eb)  links;
  193         path_id_t            path_id;
  194         struct cam_sim       *sim;
  195         struct timeval       last_reset;
  196         u_int32_t            flags;
  197 #define CAM_EB_RUNQ_SCHEDULED   0x01
  198         u_int32_t            refcount;
  199         u_int                generation;
  200         device_t             parent_dev;
  201 };
  202 
  203 struct cam_path {
  204         struct cam_periph *periph;
  205         struct cam_eb     *bus;
  206         struct cam_et     *target;
  207         struct cam_ed     *device;
  208 };
  209 
  210 struct xpt_quirk_entry {
  211         struct scsi_inquiry_pattern inq_pat;
  212         u_int8_t quirks;
  213 #define CAM_QUIRK_NOLUNS        0x01
  214 #define CAM_QUIRK_NOSERIAL      0x02
  215 #define CAM_QUIRK_HILUNS        0x04
  216 #define CAM_QUIRK_NOHILUNS      0x08
  217         u_int mintags;
  218         u_int maxtags;
  219 };
  220 
  221 static int cam_srch_hi = 0;
  222 TUNABLE_INT("kern.cam.cam_srch_hi", &cam_srch_hi);
  223 static int sysctl_cam_search_luns(SYSCTL_HANDLER_ARGS);
  224 SYSCTL_PROC(_kern_cam, OID_AUTO, cam_srch_hi, CTLTYPE_INT|CTLFLAG_RW, 0, 0,
  225     sysctl_cam_search_luns, "I",
  226     "allow search above LUN 7 for SCSI3 and greater devices");
  227 
  228 #define CAM_SCSI2_MAXLUN        8
  229 /*
  230  * If we're not quirked to search <= the first 8 luns
  231  * and we are either quirked to search above lun 8,
  232  * or we're > SCSI-2 and we've enabled hilun searching,
  233  * or we're > SCSI-2 and the last lun was a success,
  234  * we can look for luns above lun 8.
  235  */
  236 #define CAN_SRCH_HI_SPARSE(dv)                          \
  237   (((dv->quirk->quirks & CAM_QUIRK_NOHILUNS) == 0)      \
  238   && ((dv->quirk->quirks & CAM_QUIRK_HILUNS)            \
  239   || (SID_ANSI_REV(&dv->inq_data) > SCSI_REV_2 && cam_srch_hi)))
  240 
  241 #define CAN_SRCH_HI_DENSE(dv)                           \
  242   (((dv->quirk->quirks & CAM_QUIRK_NOHILUNS) == 0)      \
  243   && ((dv->quirk->quirks & CAM_QUIRK_HILUNS)            \
  244   || (SID_ANSI_REV(&dv->inq_data) > SCSI_REV_2)))
  245 
  246 typedef enum {
  247         XPT_FLAG_OPEN           = 0x01
  248 } xpt_flags;
  249 
  250 struct xpt_softc {
  251         xpt_flags               flags;
  252         u_int32_t               xpt_generation;
  253 
  254         /* number of high powered commands that can go through right now */
  255         STAILQ_HEAD(highpowerlist, ccb_hdr)     highpowerq;
  256         int                     num_highpower;
  257 
  258         /* queue for handling async rescan requests. */
  259         TAILQ_HEAD(, ccb_hdr) ccb_scanq;
  260 
  261         /* Registered busses */
  262         TAILQ_HEAD(,cam_eb)     xpt_busses;
  263         u_int                   bus_generation;
  264 
  265         struct intr_config_hook *xpt_config_hook;
  266 
  267         struct mtx              xpt_topo_lock;
  268         struct mtx              xpt_lock;
  269 };
  270 
  271 static const char quantum[] = "QUANTUM";
  272 static const char sony[] = "SONY";
  273 static const char west_digital[] = "WDIGTL";
  274 static const char samsung[] = "SAMSUNG";
  275 static const char seagate[] = "SEAGATE";
  276 static const char microp[] = "MICROP";
  277 
  278 static struct xpt_quirk_entry xpt_quirk_table[] =
  279 {
  280         {
  281                 /* Reports QUEUE FULL for temporary resource shortages */
  282                 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP39100*", "*" },
  283                 /*quirks*/0, /*mintags*/24, /*maxtags*/32
  284         },
  285         {
  286                 /* Reports QUEUE FULL for temporary resource shortages */
  287                 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP34550*", "*" },
  288                 /*quirks*/0, /*mintags*/24, /*maxtags*/32
  289         },
  290         {
  291                 /* Reports QUEUE FULL for temporary resource shortages */
  292                 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP32275*", "*" },
  293                 /*quirks*/0, /*mintags*/24, /*maxtags*/32
  294         },
  295         {
  296                 /* Broken tagged queuing drive */
  297                 { T_DIRECT, SIP_MEDIA_FIXED, microp, "4421-07*", "*" },
  298                 /*quirks*/0, /*mintags*/0, /*maxtags*/
  299         },
  300         {
  301                 /* Broken tagged queuing drive */
  302                 { T_DIRECT, SIP_MEDIA_FIXED, "HP", "C372*", "*" },
  303                 /*quirks*/0, /*mintags*/0, /*maxtags*/
  304         },
  305         {
  306                 /* Broken tagged queuing drive */
  307                 { T_DIRECT, SIP_MEDIA_FIXED, microp, "3391*", "x43h" },
  308                 /*quirks*/0, /*mintags*/0, /*maxtags*/
  309         },
  310         {
  311                 /*
  312                  * Unfortunately, the Quantum Atlas III has the same
  313                  * problem as the Atlas II drives above.
  314                  * Reported by: "Johan Granlund" <johan@granlund.nu>
  315                  *
  316                  * For future reference, the drive with the problem was:
  317                  * QUANTUM QM39100TD-SW N1B0
  318                  *
  319                  * It's possible that Quantum will fix the problem in later
  320                  * firmware revisions.  If that happens, the quirk entry
  321                  * will need to be made specific to the firmware revisions
  322                  * with the problem.
  323                  *
  324                  */
  325                 /* Reports QUEUE FULL for temporary resource shortages */
  326                 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM39100*", "*" },
  327                 /*quirks*/0, /*mintags*/24, /*maxtags*/32
  328         },
  329         {
  330                 /*
  331                  * 18 Gig Atlas III, same problem as the 9G version.
  332                  * Reported by: Andre Albsmeier
  333                  *              <andre.albsmeier@mchp.siemens.de>
  334                  *
  335                  * For future reference, the drive with the problem was:
  336                  * QUANTUM QM318000TD-S N491
  337                  */
  338                 /* Reports QUEUE FULL for temporary resource shortages */
  339                 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM318000*", "*" },
  340                 /*quirks*/0, /*mintags*/24, /*maxtags*/32
  341         },
  342         {
  343                 /*
  344                  * Broken tagged queuing drive
  345                  * Reported by: Bret Ford <bford@uop.cs.uop.edu>
  346                  *         and: Martin Renters <martin@tdc.on.ca>
  347                  */
  348                 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST410800*", "71*" },
  349                 /*quirks*/0, /*mintags*/0, /*maxtags*/
  350         },
  351                 /*
  352                  * The Seagate Medalist Pro drives have very poor write
  353                  * performance with anything more than 2 tags.
  354                  *
  355                  * Reported by:  Paul van der Zwan <paulz@trantor.xs4all.nl>
  356                  * Drive:  <SEAGATE ST36530N 1444>
  357                  *
  358                  * Reported by:  Jeremy Lea <reg@shale.csir.co.za>
  359                  * Drive:  <SEAGATE ST34520W 1281>
  360                  *
  361                  * No one has actually reported that the 9G version
  362                  * (ST39140*) of the Medalist Pro has the same problem, but
  363                  * we're assuming that it does because the 4G and 6.5G
  364                  * versions of the drive are broken.
  365                  */
  366         {
  367                 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST34520*", "*"},
  368                 /*quirks*/0, /*mintags*/2, /*maxtags*/2
  369         },
  370         {
  371                 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST36530*", "*"},
  372                 /*quirks*/0, /*mintags*/2, /*maxtags*/2
  373         },
  374         {
  375                 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST39140*", "*"},
  376                 /*quirks*/0, /*mintags*/2, /*maxtags*/2
  377         },
  378         {
  379                 /*
  380                  * Slow when tagged queueing is enabled.  Write performance
  381                  * steadily drops off with more and more concurrent
  382                  * transactions.  Best sequential write performance with
  383                  * tagged queueing turned off and write caching turned on.
  384                  *
  385                  * PR:  kern/10398
  386                  * Submitted by:  Hideaki Okada <hokada@isl.melco.co.jp>
  387                  * Drive:  DCAS-34330 w/ "S65A" firmware.
  388                  *
  389                  * The drive with the problem had the "S65A" firmware
  390                  * revision, and has also been reported (by Stephen J.
  391                  * Roznowski <sjr@home.net>) for a drive with the "S61A"
  392                  * firmware revision.
  393                  *
  394                  * Although no one has reported problems with the 2 gig
  395                  * version of the DCAS drive, the assumption is that it
  396                  * has the same problems as the 4 gig version.  Therefore
  397                  * this quirk entries disables tagged queueing for all
  398                  * DCAS drives.
  399                  */
  400                 { T_DIRECT, SIP_MEDIA_FIXED, "IBM", "DCAS*", "*" },
  401                 /*quirks*/0, /*mintags*/0, /*maxtags*/
  402         },
  403         {
  404                 /* Broken tagged queuing drive */
  405                 { T_DIRECT, SIP_MEDIA_REMOVABLE, "iomega", "jaz*", "*" },
  406                 /*quirks*/0, /*mintags*/0, /*maxtags*/
  407         },
  408         {
  409                 /* Broken tagged queuing drive */
  410                 { T_DIRECT, SIP_MEDIA_FIXED, "CONNER", "CFP2107*", "*" },
  411                 /*quirks*/0, /*mintags*/0, /*maxtags*/
  412         },
  413         {
  414                 /* This does not support other than LUN 0 */
  415                 { T_DIRECT, SIP_MEDIA_FIXED, "VMware*", "*", "*" },
  416                 CAM_QUIRK_NOLUNS, /*mintags*/2, /*maxtags*/255
  417         },
  418         {
  419                 /*
  420                  * Broken tagged queuing drive.
  421                  * Submitted by:
  422                  * NAKAJI Hiroyuki <nakaji@zeisei.dpri.kyoto-u.ac.jp>
  423                  * in PR kern/9535
  424                  */
  425                 { T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN34324U*", "*" },
  426                 /*quirks*/0, /*mintags*/0, /*maxtags*/
  427         },
  428         {
  429                 /*
  430                  * Slow when tagged queueing is enabled. (1.5MB/sec versus
  431                  * 8MB/sec.)
  432                  * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
  433                  * Best performance with these drives is achieved with
  434                  * tagged queueing turned off, and write caching turned on.
  435                  */
  436                 { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "WDE*", "*" },
  437                 /*quirks*/0, /*mintags*/0, /*maxtags*/
  438         },
  439         {
  440                 /*
  441                  * Slow when tagged queueing is enabled. (1.5MB/sec versus
  442                  * 8MB/sec.)
  443                  * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
  444                  * Best performance with these drives is achieved with
  445                  * tagged queueing turned off, and write caching turned on.
  446                  */
  447                 { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "ENTERPRISE", "*" },
  448                 /*quirks*/0, /*mintags*/0, /*maxtags*/
  449         },
  450         {
  451                 /*
  452                  * Doesn't handle queue full condition correctly,
  453                  * so we need to limit maxtags to what the device
  454                  * can handle instead of determining this automatically.
  455                  */
  456                 { T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN321010S*", "*" },
  457                 /*quirks*/0, /*mintags*/2, /*maxtags*/32
  458         },
  459         {
  460                 /* Really only one LUN */
  461                 { T_ENCLOSURE, SIP_MEDIA_FIXED, "SUN", "SENA", "*" },
  462                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
  463         },
  464         {
  465                 /* I can't believe we need a quirk for DPT volumes. */
  466                 { T_ANY, SIP_MEDIA_FIXED|SIP_MEDIA_REMOVABLE, "DPT", "*", "*" },
  467                 CAM_QUIRK_NOLUNS,
  468                 /*mintags*/0, /*maxtags*/255
  469         },
  470         {
  471                 /*
  472                  * Many Sony CDROM drives don't like multi-LUN probing.
  473                  */
  474                 { T_CDROM, SIP_MEDIA_REMOVABLE, sony, "CD-ROM CDU*", "*" },
  475                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
  476         },
  477         {
  478                 /*
  479                  * This drive doesn't like multiple LUN probing.
  480                  * Submitted by:  Parag Patel <parag@cgt.com>
  481                  */
  482                 { T_WORM, SIP_MEDIA_REMOVABLE, sony, "CD-R   CDU9*", "*" },
  483                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
  484         },
  485         {
  486                 { T_WORM, SIP_MEDIA_REMOVABLE, "YAMAHA", "CDR100*", "*" },
  487                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
  488         },
  489         {
  490                 /*
  491                  * The 8200 doesn't like multi-lun probing, and probably
  492                  * don't like serial number requests either.
  493                  */
  494                 {
  495                         T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE",
  496                         "EXB-8200*", "*"
  497                 },
  498                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
  499         },
  500         {
  501                 /*
  502                  * Let's try the same as above, but for a drive that says
  503                  * it's an IPL-6860 but is actually an EXB 8200.
  504                  */
  505                 {
  506                         T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE",
  507                         "IPL-6860*", "*"
  508                 },
  509                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
  510         },
  511         {
  512                 /*
  513                  * These Hitachi drives don't like multi-lun probing.
  514                  * The PR submitter has a DK319H, but says that the Linux
  515                  * kernel has a similar work-around for the DK312 and DK314,
  516                  * so all DK31* drives are quirked here.
  517                  * PR:            misc/18793
  518                  * Submitted by:  Paul Haddad <paul@pth.com>
  519                  */
  520                 { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "DK31*", "*" },
  521                 CAM_QUIRK_NOLUNS, /*mintags*/2, /*maxtags*/255
  522         },
  523         {
  524                 /*
  525                  * The Hitachi CJ series with J8A8 firmware apparantly has
  526                  * problems with tagged commands.
  527                  * PR: 23536
  528                  * Reported by: amagai@nue.org
  529                  */
  530                 { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "DK32CJ*", "J8A8" },
  531                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
  532         },
  533         {
  534                 /*
  535                  * These are the large storage arrays.
  536                  * Submitted by:  William Carrel <william.carrel@infospace.com>
  537                  */
  538                 { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "OPEN*", "*" },
  539                 CAM_QUIRK_HILUNS, 2, 1024
  540         },
  541         {
  542                 /*
  543                  * This old revision of the TDC3600 is also SCSI-1, and
  544                  * hangs upon serial number probing.
  545                  */
  546                 {
  547                         T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "TANDBERG",
  548                         " TDC 3600", "U07:"
  549                 },
  550                 CAM_QUIRK_NOSERIAL, /*mintags*/0, /*maxtags*/
  551         },
  552         {
  553                 /*
  554                  * Would repond to all LUNs if asked for.
  555                  */
  556                 {
  557                         T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "CALIPER",
  558                         "CP150", "*"
  559                 },
  560                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
  561         },
  562         {
  563                 /*
  564                  * Would repond to all LUNs if asked for.
  565                  */
  566                 {
  567                         T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "KENNEDY",
  568                         "96X2*", "*"
  569                 },
  570                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
  571         },
  572         {
  573                 /* Submitted by: Matthew Dodd <winter@jurai.net> */
  574                 { T_PROCESSOR, SIP_MEDIA_FIXED, "Cabletrn", "EA41*", "*" },
  575                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
  576         },
  577         {
  578                 /* Submitted by: Matthew Dodd <winter@jurai.net> */
  579                 { T_PROCESSOR, SIP_MEDIA_FIXED, "CABLETRN", "EA41*", "*" },
  580                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
  581         },
  582         {
  583                 /* TeraSolutions special settings for TRC-22 RAID */
  584                 { T_DIRECT, SIP_MEDIA_FIXED, "TERASOLU", "TRC-22", "*" },
  585                   /*quirks*/0, /*mintags*/55, /*maxtags*/255
  586         },
  587         {
  588                 /* Veritas Storage Appliance */
  589                 { T_DIRECT, SIP_MEDIA_FIXED, "VERITAS", "*", "*" },
  590                   CAM_QUIRK_HILUNS, /*mintags*/2, /*maxtags*/1024
  591         },
  592         {
  593                 /*
  594                  * Would respond to all LUNs.  Device type and removable
  595                  * flag are jumper-selectable.
  596                  */
  597                 { T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED, "MaxOptix",
  598                   "Tahiti 1", "*"
  599                 },
  600                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
  601         },
  602         {
  603                 /* EasyRAID E5A aka. areca ARC-6010 */
  604                 { T_DIRECT, SIP_MEDIA_FIXED, "easyRAID", "*", "*" },
  605                   CAM_QUIRK_NOHILUNS, /*mintags*/2, /*maxtags*/255
  606         },
  607         {
  608                 { T_ENCLOSURE, SIP_MEDIA_FIXED, "DP", "BACKPLANE", "*" },
  609                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
  610         },
  611         {
  612                 /* Default tagged queuing parameters for all devices */
  613                 {
  614                   T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
  615                   /*vendor*/"*", /*product*/"*", /*revision*/"*"
  616                 },
  617                 /*quirks*/0, /*mintags*/2, /*maxtags*/255
  618         },
  619 };
  620 
  621 static const int xpt_quirk_table_size =
  622         sizeof(xpt_quirk_table) / sizeof(*xpt_quirk_table);
  623 
  624 typedef enum {
  625         DM_RET_COPY             = 0x01,
  626         DM_RET_FLAG_MASK        = 0x0f,
  627         DM_RET_NONE             = 0x00,
  628         DM_RET_STOP             = 0x10,
  629         DM_RET_DESCEND          = 0x20,
  630         DM_RET_ERROR            = 0x30,
  631         DM_RET_ACTION_MASK      = 0xf0
  632 } dev_match_ret;
  633 
  634 typedef enum {
  635         XPT_DEPTH_BUS,
  636         XPT_DEPTH_TARGET,
  637         XPT_DEPTH_DEVICE,
  638         XPT_DEPTH_PERIPH
  639 } xpt_traverse_depth;
  640 
  641 struct xpt_traverse_config {
  642         xpt_traverse_depth      depth;
  643         void                    *tr_func;
  644         void                    *tr_arg;
  645 };
  646 
  647 typedef int     xpt_busfunc_t (struct cam_eb *bus, void *arg);
  648 typedef int     xpt_targetfunc_t (struct cam_et *target, void *arg);
  649 typedef int     xpt_devicefunc_t (struct cam_ed *device, void *arg);
  650 typedef int     xpt_periphfunc_t (struct cam_periph *periph, void *arg);
  651 typedef int     xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg);
  652 
  653 /* Transport layer configuration information */
  654 static struct xpt_softc xsoftc;
  655 
  656 /* Queues for our software interrupt handler */
  657 typedef TAILQ_HEAD(cam_isrq, ccb_hdr) cam_isrq_t;
  658 typedef TAILQ_HEAD(cam_simq, cam_sim) cam_simq_t;
  659 static cam_simq_t cam_simq;
  660 static struct mtx cam_simq_lock;
  661 
  662 /* Pointers to software interrupt handlers */
  663 static void *cambio_ih;
  664 
  665 struct cam_periph *xpt_periph;
  666 
  667 static periph_init_t xpt_periph_init;
  668 
  669 static periph_init_t probe_periph_init;
  670 
  671 static struct periph_driver xpt_driver =
  672 {
  673         xpt_periph_init, "xpt",
  674         TAILQ_HEAD_INITIALIZER(xpt_driver.units)
  675 };
  676 
  677 static struct periph_driver probe_driver =
  678 {
  679         probe_periph_init, "probe",
  680         TAILQ_HEAD_INITIALIZER(probe_driver.units)
  681 };
  682 
  683 PERIPHDRIVER_DECLARE(xpt, xpt_driver);
  684 PERIPHDRIVER_DECLARE(probe, probe_driver);
  685 
  686 
  687 static d_open_t xptopen;
  688 static d_close_t xptclose;
  689 static d_ioctl_t xptioctl;
  690 
  691 static struct cdevsw xpt_cdevsw = {
  692         .d_version =    D_VERSION,
  693         .d_flags =      0,
  694         .d_open =       xptopen,
  695         .d_close =      xptclose,
  696         .d_ioctl =      xptioctl,
  697         .d_name =       "xpt",
  698 };
  699 
  700 
  701 static void dead_sim_action(struct cam_sim *sim, union ccb *ccb);
  702 static void dead_sim_poll(struct cam_sim *sim);
  703 
  704 /* Dummy SIM that is used when the real one has gone. */
  705 static struct cam_sim cam_dead_sim = {
  706         .sim_action =   dead_sim_action,
  707         .sim_poll =     dead_sim_poll,
  708         .sim_name =     "dead_sim",
  709 };
  710 
  711 #define SIM_DEAD(sim)   ((sim) == &cam_dead_sim)
  712 
  713 
  714 /* Storage for debugging datastructures */
  715 #ifdef  CAMDEBUG
  716 struct cam_path *cam_dpath;
  717 u_int32_t cam_dflags;
  718 u_int32_t cam_debug_delay;
  719 #endif
  720 
  721 #if defined(CAM_DEBUG_FLAGS) && !defined(CAMDEBUG)
  722 #error "You must have options CAMDEBUG to use options CAM_DEBUG_FLAGS"
  723 #endif
  724 
  725 /*
  726  * In order to enable the CAM_DEBUG_* options, the user must have CAMDEBUG
  727  * enabled.  Also, the user must have either none, or all of CAM_DEBUG_BUS,
  728  * CAM_DEBUG_TARGET, and CAM_DEBUG_LUN specified.
  729  */
  730 #if defined(CAM_DEBUG_BUS) || defined(CAM_DEBUG_TARGET) \
  731     || defined(CAM_DEBUG_LUN)
  732 #ifdef CAMDEBUG
  733 #if !defined(CAM_DEBUG_BUS) || !defined(CAM_DEBUG_TARGET) \
  734     || !defined(CAM_DEBUG_LUN)
  735 #error "You must define all or none of CAM_DEBUG_BUS, CAM_DEBUG_TARGET \
  736         and CAM_DEBUG_LUN"
  737 #endif /* !CAM_DEBUG_BUS || !CAM_DEBUG_TARGET || !CAM_DEBUG_LUN */
  738 #else /* !CAMDEBUG */
  739 #error "You must use options CAMDEBUG if you use the CAM_DEBUG_* options"
  740 #endif /* CAMDEBUG */
  741 #endif /* CAM_DEBUG_BUS || CAM_DEBUG_TARGET || CAM_DEBUG_LUN */
  742 
  743 /* Our boot-time initialization hook */
  744 static int cam_module_event_handler(module_t, int /*modeventtype_t*/, void *);
  745 
  746 static moduledata_t cam_moduledata = {
  747         "cam",
  748         cam_module_event_handler,
  749         NULL
  750 };
  751 
  752 static int      xpt_init(void *);
  753 
  754 DECLARE_MODULE(cam, cam_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND);
  755 MODULE_VERSION(cam, 1);
  756 
  757 
  758 static cam_status       xpt_compile_path(struct cam_path *new_path,
  759                                          struct cam_periph *perph,
  760                                          path_id_t path_id,
  761                                          target_id_t target_id,
  762                                          lun_id_t lun_id);
  763 
  764 static void             xpt_release_path(struct cam_path *path);
  765 
  766 static void             xpt_async_bcast(struct async_list *async_head,
  767                                         u_int32_t async_code,
  768                                         struct cam_path *path,
  769                                         void *async_arg);
  770 static void             xpt_dev_async(u_int32_t async_code,
  771                                       struct cam_eb *bus,
  772                                       struct cam_et *target,
  773                                       struct cam_ed *device,
  774                                       void *async_arg);
  775 static path_id_t xptnextfreepathid(void);
  776 static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus);
  777 static union ccb *xpt_get_ccb(struct cam_ed *device);
  778 static int       xpt_schedule_dev(struct camq *queue, cam_pinfo *dev_pinfo,
  779                                   u_int32_t new_priority);
  780 static void      xpt_run_dev_allocq(struct cam_eb *bus);
  781 static void      xpt_run_dev_sendq(struct cam_eb *bus);
  782 static timeout_t xpt_release_devq_timeout;
  783 static void      xpt_release_simq_timeout(void *arg) __unused;
  784 static void      xpt_release_bus(struct cam_eb *bus);
  785 static void      xpt_release_devq_device(struct cam_ed *dev, u_int count,
  786                                          int run_queue);
  787 static struct cam_et*
  788                  xpt_alloc_target(struct cam_eb *bus, target_id_t target_id);
  789 static void      xpt_release_target(struct cam_eb *bus, struct cam_et *target);
  790 static struct cam_ed*
  791                  xpt_alloc_device(struct cam_eb *bus, struct cam_et *target,
  792                                   lun_id_t lun_id);
  793 static void      xpt_release_device(struct cam_eb *bus, struct cam_et *target,
  794                                     struct cam_ed *device);
  795 static u_int32_t xpt_dev_ccbq_resize(struct cam_path *path, int newopenings);
  796 static struct cam_eb*
  797                  xpt_find_bus(path_id_t path_id);
  798 static struct cam_et*
  799                  xpt_find_target(struct cam_eb *bus, target_id_t target_id);
  800 static struct cam_ed*
  801                  xpt_find_device(struct cam_et *target, lun_id_t lun_id);
  802 static void      xpt_scan_bus(struct cam_periph *periph, union ccb *ccb);
  803 static void      xpt_scan_lun(struct cam_periph *periph,
  804                               struct cam_path *path, cam_flags flags,
  805                               union ccb *ccb);
  806 static void      xptscandone(struct cam_periph *periph, union ccb *done_ccb);
  807 static xpt_busfunc_t    xptconfigbuscountfunc;
  808 static xpt_busfunc_t    xptconfigfunc;
  809 static void      xpt_config(void *arg);
  810 static xpt_devicefunc_t xptpassannouncefunc;
  811 static void      xpt_finishconfig(struct cam_periph *periph, union ccb *ccb);
  812 static void      xptaction(struct cam_sim *sim, union ccb *work_ccb);
  813 static void      xptpoll(struct cam_sim *sim);
  814 static void      camisr(void *);
  815 static void      camisr_runqueue(void *);
  816 static dev_match_ret    xptbusmatch(struct dev_match_pattern *patterns,
  817                                     u_int num_patterns, struct cam_eb *bus);
  818 static dev_match_ret    xptdevicematch(struct dev_match_pattern *patterns,
  819                                        u_int num_patterns,
  820                                        struct cam_ed *device);
  821 static dev_match_ret    xptperiphmatch(struct dev_match_pattern *patterns,
  822                                        u_int num_patterns,
  823                                        struct cam_periph *periph);
  824 static xpt_busfunc_t    xptedtbusfunc;
  825 static xpt_targetfunc_t xptedttargetfunc;
  826 static xpt_devicefunc_t xptedtdevicefunc;
  827 static xpt_periphfunc_t xptedtperiphfunc;
  828 static xpt_pdrvfunc_t   xptplistpdrvfunc;
  829 static xpt_periphfunc_t xptplistperiphfunc;
  830 static int              xptedtmatch(struct ccb_dev_match *cdm);
  831 static int              xptperiphlistmatch(struct ccb_dev_match *cdm);
  832 static int              xptbustraverse(struct cam_eb *start_bus,
  833                                        xpt_busfunc_t *tr_func, void *arg);
  834 static int              xpttargettraverse(struct cam_eb *bus,
  835                                           struct cam_et *start_target,
  836                                           xpt_targetfunc_t *tr_func, void *arg);
  837 static int              xptdevicetraverse(struct cam_et *target,
  838                                           struct cam_ed *start_device,
  839                                           xpt_devicefunc_t *tr_func, void *arg);
  840 static int              xptperiphtraverse(struct cam_ed *device,
  841                                           struct cam_periph *start_periph,
  842                                           xpt_periphfunc_t *tr_func, void *arg);
  843 static int              xptpdrvtraverse(struct periph_driver **start_pdrv,
  844                                         xpt_pdrvfunc_t *tr_func, void *arg);
  845 static int              xptpdperiphtraverse(struct periph_driver **pdrv,
  846                                             struct cam_periph *start_periph,
  847                                             xpt_periphfunc_t *tr_func,
  848                                             void *arg);
  849 static xpt_busfunc_t    xptdefbusfunc;
  850 static xpt_targetfunc_t xptdeftargetfunc;
  851 static xpt_devicefunc_t xptdefdevicefunc;
  852 static xpt_periphfunc_t xptdefperiphfunc;
  853 static int              xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg);
  854 static int              xpt_for_all_devices(xpt_devicefunc_t *tr_func,
  855                                             void *arg);
  856 static xpt_devicefunc_t xptsetasyncfunc;
  857 static xpt_busfunc_t    xptsetasyncbusfunc;
  858 static cam_status       xptregister(struct cam_periph *periph,
  859                                     void *arg);
  860 static cam_status       proberegister(struct cam_periph *periph,
  861                                       void *arg);
  862 static void      probeschedule(struct cam_periph *probe_periph);
  863 static void      probestart(struct cam_periph *periph, union ccb *start_ccb);
  864 static void      proberequestdefaultnegotiation(struct cam_periph *periph);
  865 static int       proberequestbackoff(struct cam_periph *periph,
  866                                      struct cam_ed *device);
  867 static void      probedone(struct cam_periph *periph, union ccb *done_ccb);
  868 static void      probecleanup(struct cam_periph *periph);
  869 static void      xpt_find_quirk(struct cam_ed *device);
  870 static void      xpt_devise_transport(struct cam_path *path);
  871 static void      xpt_set_transfer_settings(struct ccb_trans_settings *cts,
  872                                            struct cam_ed *device,
  873                                            int async_update);
  874 static void      xpt_toggle_tags(struct cam_path *path);
  875 static void      xpt_start_tags(struct cam_path *path);
  876 static __inline int xpt_schedule_dev_allocq(struct cam_eb *bus,
  877                                             struct cam_ed *dev);
  878 static __inline int xpt_schedule_dev_sendq(struct cam_eb *bus,
  879                                            struct cam_ed *dev);
  880 static __inline int periph_is_queued(struct cam_periph *periph);
  881 static __inline int device_is_alloc_queued(struct cam_ed *device);
  882 static __inline int device_is_send_queued(struct cam_ed *device);
  883 static __inline int dev_allocq_is_runnable(struct cam_devq *devq);
  884 
  885 static __inline int
  886 xpt_schedule_dev_allocq(struct cam_eb *bus, struct cam_ed *dev)
  887 {
  888         int retval;
  889 
  890         if (dev->ccbq.devq_openings > 0) {
  891                 if ((dev->flags & CAM_DEV_RESIZE_QUEUE_NEEDED) != 0) {
  892                         cam_ccbq_resize(&dev->ccbq,
  893                                         dev->ccbq.dev_openings
  894                                         + dev->ccbq.dev_active);
  895                         dev->flags &= ~CAM_DEV_RESIZE_QUEUE_NEEDED;
  896                 }
  897                 /*
  898                  * The priority of a device waiting for CCB resources
  899                  * is that of the the highest priority peripheral driver
  900                  * enqueued.
  901                  */
  902                 retval = xpt_schedule_dev(&bus->sim->devq->alloc_queue,
  903                                           &dev->alloc_ccb_entry.pinfo,
  904                                           CAMQ_GET_HEAD(&dev->drvq)->priority);
  905         } else {
  906                 retval = 0;
  907         }
  908 
  909         return (retval);
  910 }
  911 
  912 static __inline int
  913 xpt_schedule_dev_sendq(struct cam_eb *bus, struct cam_ed *dev)
  914 {
  915         int     retval;
  916 
  917         if (dev->ccbq.dev_openings > 0) {
  918                 /*
  919                  * The priority of a device waiting for controller
  920                  * resources is that of the the highest priority CCB
  921                  * enqueued.
  922                  */
  923                 retval =
  924                     xpt_schedule_dev(&bus->sim->devq->send_queue,
  925                                      &dev->send_ccb_entry.pinfo,
  926                                      CAMQ_GET_HEAD(&dev->ccbq.queue)->priority);
  927         } else {
  928                 retval = 0;
  929         }
  930         return (retval);
  931 }
  932 
  933 static __inline int
  934 periph_is_queued(struct cam_periph *periph)
  935 {
  936         return (periph->pinfo.index != CAM_UNQUEUED_INDEX);
  937 }
  938 
  939 static __inline int
  940 device_is_alloc_queued(struct cam_ed *device)
  941 {
  942         return (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
  943 }
  944 
  945 static __inline int
  946 device_is_send_queued(struct cam_ed *device)
  947 {
  948         return (device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
  949 }
  950 
  951 static __inline int
  952 dev_allocq_is_runnable(struct cam_devq *devq)
  953 {
  954         /*
  955          * Have work to do.
  956          * Have space to do more work.
  957          * Allowed to do work.
  958          */
  959         return ((devq->alloc_queue.qfrozen_cnt == 0)
  960              && (devq->alloc_queue.entries > 0)
  961              && (devq->alloc_openings > 0));
  962 }
  963 
  964 static void
  965 xpt_periph_init()
  966 {
  967         make_dev(&xpt_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "xpt0");
  968 }
  969 
  970 static void
  971 probe_periph_init()
  972 {
  973 }
  974 
  975 
  976 static void
  977 xptdone(struct cam_periph *periph, union ccb *done_ccb)
  978 {
  979         /* Caller will release the CCB */
  980         wakeup(&done_ccb->ccb_h.cbfcnp);
  981 }
  982 
  983 static int
  984 xptopen(struct cdev *dev, int flags, int fmt, struct thread *td)
  985 {
  986 
  987         /*
  988          * Only allow read-write access.
  989          */
  990         if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0))
  991                 return(EPERM);
  992 
  993         /*
  994          * We don't allow nonblocking access.
  995          */
  996         if ((flags & O_NONBLOCK) != 0) {
  997                 printf("%s: can't do nonblocking access\n", devtoname(dev));
  998                 return(ENODEV);
  999         }
 1000 
 1001         /* Mark ourselves open */
 1002         mtx_lock(&xsoftc.xpt_lock);
 1003         xsoftc.flags |= XPT_FLAG_OPEN;
 1004         mtx_unlock(&xsoftc.xpt_lock);
 1005 
 1006         return(0);
 1007 }
 1008 
 1009 static int
 1010 xptclose(struct cdev *dev, int flag, int fmt, struct thread *td)
 1011 {
 1012 
 1013         /* Mark ourselves closed */
 1014         mtx_lock(&xsoftc.xpt_lock);
 1015         xsoftc.flags &= ~XPT_FLAG_OPEN;
 1016         mtx_unlock(&xsoftc.xpt_lock);
 1017 
 1018         return(0);
 1019 }
 1020 
 1021 /*
 1022  * Don't automatically grab the xpt softc lock here even though this is going
 1023  * through the xpt device.  The xpt device is really just a back door for
 1024  * accessing other devices and SIMs, so the right thing to do is to grab
 1025  * the appropriate SIM lock once the bus/SIM is located.
 1026  */
 1027 static int
 1028 xptioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
 1029 {
 1030         int error;
 1031 
 1032         error = 0;
 1033 
 1034         switch(cmd) {
 1035         /*
 1036          * For the transport layer CAMIOCOMMAND ioctl, we really only want
 1037          * to accept CCB types that don't quite make sense to send through a
 1038          * passthrough driver. XPT_PATH_INQ is an exception to this, as stated
 1039          * in the CAM spec.
 1040          */
 1041         case CAMIOCOMMAND: {
 1042                 union ccb *ccb;
 1043                 union ccb *inccb;
 1044                 struct cam_eb *bus;
 1045 
 1046                 inccb = (union ccb *)addr;
 1047 
 1048                 bus = xpt_find_bus(inccb->ccb_h.path_id);
 1049                 if (bus == NULL) {
 1050                         error = EINVAL;
 1051                         break;
 1052                 }
 1053 
 1054                 switch(inccb->ccb_h.func_code) {
 1055                 case XPT_SCAN_BUS:
 1056                 case XPT_RESET_BUS:
 1057                         if ((inccb->ccb_h.target_id != CAM_TARGET_WILDCARD)
 1058                          || (inccb->ccb_h.target_lun != CAM_LUN_WILDCARD)) {
 1059                                 error = EINVAL;
 1060                                 break;
 1061                         }
 1062                         /* FALLTHROUGH */
 1063                 case XPT_PATH_INQ:
 1064                 case XPT_ENG_INQ:
 1065                 case XPT_SCAN_LUN:
 1066 
 1067                         ccb = xpt_alloc_ccb();
 1068 
 1069                         CAM_SIM_LOCK(bus->sim);
 1070 
 1071                         /*
 1072                          * Create a path using the bus, target, and lun the
 1073                          * user passed in.
 1074                          */
 1075                         if (xpt_create_path(&ccb->ccb_h.path, xpt_periph,
 1076                                             inccb->ccb_h.path_id,
 1077                                             inccb->ccb_h.target_id,
 1078                                             inccb->ccb_h.target_lun) !=
 1079                                             CAM_REQ_CMP){
 1080                                 error = EINVAL;
 1081                                 CAM_SIM_UNLOCK(bus->sim);
 1082                                 xpt_free_ccb(ccb);
 1083                                 break;
 1084                         }
 1085                         /* Ensure all of our fields are correct */
 1086                         xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path,
 1087                                       inccb->ccb_h.pinfo.priority);
 1088                         xpt_merge_ccb(ccb, inccb);
 1089                         ccb->ccb_h.cbfcnp = xptdone;
 1090                         cam_periph_runccb(ccb, NULL, 0, 0, NULL);
 1091                         bcopy(ccb, inccb, sizeof(union ccb));
 1092                         xpt_free_path(ccb->ccb_h.path);
 1093                         xpt_free_ccb(ccb);
 1094                         CAM_SIM_UNLOCK(bus->sim);
 1095                         break;
 1096 
 1097                 case XPT_DEBUG: {
 1098                         union ccb ccb;
 1099 
 1100                         /*
 1101                          * This is an immediate CCB, so it's okay to
 1102                          * allocate it on the stack.
 1103                          */
 1104 
 1105                         CAM_SIM_LOCK(bus->sim);
 1106 
 1107                         /*
 1108                          * Create a path using the bus, target, and lun the
 1109                          * user passed in.
 1110                          */
 1111                         if (xpt_create_path(&ccb.ccb_h.path, xpt_periph,
 1112                                             inccb->ccb_h.path_id,
 1113                                             inccb->ccb_h.target_id,
 1114                                             inccb->ccb_h.target_lun) !=
 1115                                             CAM_REQ_CMP){
 1116                                 error = EINVAL;
 1117                                 CAM_SIM_UNLOCK(bus->sim);
 1118                                 break;
 1119                         }
 1120                         /* Ensure all of our fields are correct */
 1121                         xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path,
 1122                                       inccb->ccb_h.pinfo.priority);
 1123                         xpt_merge_ccb(&ccb, inccb);
 1124                         ccb.ccb_h.cbfcnp = xptdone;
 1125                         xpt_action(&ccb);
 1126                         CAM_SIM_UNLOCK(bus->sim);
 1127                         bcopy(&ccb, inccb, sizeof(union ccb));
 1128                         xpt_free_path(ccb.ccb_h.path);
 1129                         break;
 1130 
 1131                 }
 1132                 case XPT_DEV_MATCH: {
 1133                         struct cam_periph_map_info mapinfo;
 1134                         struct cam_path *old_path;
 1135 
 1136                         /*
 1137                          * We can't deal with physical addresses for this
 1138                          * type of transaction.
 1139                          */
 1140                         if (inccb->ccb_h.flags & CAM_DATA_PHYS) {
 1141                                 error = EINVAL;
 1142                                 break;
 1143                         }
 1144 
 1145                         /*
 1146                          * Save this in case the caller had it set to
 1147                          * something in particular.
 1148                          */
 1149                         old_path = inccb->ccb_h.path;
 1150 
 1151                         /*
 1152                          * We really don't need a path for the matching
 1153                          * code.  The path is needed because of the
 1154                          * debugging statements in xpt_action().  They
 1155                          * assume that the CCB has a valid path.
 1156                          */
 1157                         inccb->ccb_h.path = xpt_periph->path;
 1158 
 1159                         bzero(&mapinfo, sizeof(mapinfo));
 1160 
 1161                         /*
 1162                          * Map the pattern and match buffers into kernel
 1163                          * virtual address space.
 1164                          */
 1165                         error = cam_periph_mapmem(inccb, &mapinfo);
 1166 
 1167                         if (error) {
 1168                                 inccb->ccb_h.path = old_path;
 1169                                 break;
 1170                         }
 1171 
 1172                         /*
 1173                          * This is an immediate CCB, we can send it on directly.
 1174                          */
 1175                         xpt_action(inccb);
 1176 
 1177                         /*
 1178                          * Map the buffers back into user space.
 1179                          */
 1180                         cam_periph_unmapmem(inccb, &mapinfo);
 1181 
 1182                         inccb->ccb_h.path = old_path;
 1183 
 1184                         error = 0;
 1185                         break;
 1186                 }
 1187                 default:
 1188                         error = ENOTSUP;
 1189                         break;
 1190                 }
 1191                 xpt_release_bus(bus);
 1192                 break;
 1193         }
 1194         /*
 1195          * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input,
 1196          * with the periphal driver name and unit name filled in.  The other
 1197          * fields don't really matter as input.  The passthrough driver name
 1198          * ("pass"), and unit number are passed back in the ccb.  The current
 1199          * device generation number, and the index into the device peripheral
 1200          * driver list, and the status are also passed back.  Note that
 1201          * since we do everything in one pass, unlike the XPT_GDEVLIST ccb,
 1202          * we never return a status of CAM_GDEVLIST_LIST_CHANGED.  It is
 1203          * (or rather should be) impossible for the device peripheral driver
 1204          * list to change since we look at the whole thing in one pass, and
 1205          * we do it with lock protection.
 1206          *
 1207          */
 1208         case CAMGETPASSTHRU: {
 1209                 union ccb *ccb;
 1210                 struct cam_periph *periph;
 1211                 struct periph_driver **p_drv;
 1212                 char   *name;
 1213                 u_int unit;
 1214                 u_int cur_generation;
 1215                 int base_periph_found;
 1216                 int splbreaknum;
 1217 
 1218                 ccb = (union ccb *)addr;
 1219                 unit = ccb->cgdl.unit_number;
 1220                 name = ccb->cgdl.periph_name;
 1221                 /*
 1222                  * Every 100 devices, we want to drop our lock protection to
 1223                  * give the software interrupt handler a chance to run.
 1224                  * Most systems won't run into this check, but this should
 1225                  * avoid starvation in the software interrupt handler in
 1226                  * large systems.
 1227                  */
 1228                 splbreaknum = 100;
 1229 
 1230                 ccb = (union ccb *)addr;
 1231 
 1232                 base_periph_found = 0;
 1233 
 1234                 /*
 1235                  * Sanity check -- make sure we don't get a null peripheral
 1236                  * driver name.
 1237                  */
 1238                 if (*ccb->cgdl.periph_name == '\0') {
 1239                         error = EINVAL;
 1240                         break;
 1241                 }
 1242 
 1243                 /* Keep the list from changing while we traverse it */
 1244                 mtx_lock(&xsoftc.xpt_topo_lock);
 1245 ptstartover:
 1246                 cur_generation = xsoftc.xpt_generation;
 1247 
 1248                 /* first find our driver in the list of drivers */
 1249                 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++)
 1250                         if (strcmp((*p_drv)->driver_name, name) == 0)
 1251                                 break;
 1252 
 1253                 if (*p_drv == NULL) {
 1254                         mtx_unlock(&xsoftc.xpt_topo_lock);
 1255                         ccb->ccb_h.status = CAM_REQ_CMP_ERR;
 1256                         ccb->cgdl.status = CAM_GDEVLIST_ERROR;
 1257                         *ccb->cgdl.periph_name = '\0';
 1258                         ccb->cgdl.unit_number = 0;
 1259                         error = ENOENT;
 1260                         break;
 1261                 }
 1262 
 1263                 /*
 1264                  * Run through every peripheral instance of this driver
 1265                  * and check to see whether it matches the unit passed
 1266                  * in by the user.  If it does, get out of the loops and
 1267                  * find the passthrough driver associated with that
 1268                  * peripheral driver.
 1269                  */
 1270                 for (periph = TAILQ_FIRST(&(*p_drv)->units); periph != NULL;
 1271                      periph = TAILQ_NEXT(periph, unit_links)) {
 1272 
 1273                         if (periph->unit_number == unit) {
 1274                                 break;
 1275                         } else if (--splbreaknum == 0) {
 1276                                 mtx_unlock(&xsoftc.xpt_topo_lock);
 1277                                 mtx_lock(&xsoftc.xpt_topo_lock);
 1278                                 splbreaknum = 100;
 1279                                 if (cur_generation != xsoftc.xpt_generation)
 1280                                        goto ptstartover;
 1281                         }
 1282                 }
 1283                 /*
 1284                  * If we found the peripheral driver that the user passed
 1285                  * in, go through all of the peripheral drivers for that
 1286                  * particular device and look for a passthrough driver.
 1287                  */
 1288                 if (periph != NULL) {
 1289                         struct cam_ed *device;
 1290                         int i;
 1291 
 1292                         base_periph_found = 1;
 1293                         device = periph->path->device;
 1294                         for (i = 0, periph = SLIST_FIRST(&device->periphs);
 1295                              periph != NULL;
 1296                              periph = SLIST_NEXT(periph, periph_links), i++) {
 1297                                 /*
 1298                                  * Check to see whether we have a
 1299                                  * passthrough device or not.
 1300                                  */
 1301                                 if (strcmp(periph->periph_name, "pass") == 0) {
 1302                                         /*
 1303                                          * Fill in the getdevlist fields.
 1304                                          */
 1305                                         strcpy(ccb->cgdl.periph_name,
 1306                                                periph->periph_name);
 1307                                         ccb->cgdl.unit_number =
 1308                                                 periph->unit_number;
 1309                                         if (SLIST_NEXT(periph, periph_links))
 1310                                                 ccb->cgdl.status =
 1311                                                         CAM_GDEVLIST_MORE_DEVS;
 1312                                         else
 1313                                                 ccb->cgdl.status =
 1314                                                        CAM_GDEVLIST_LAST_DEVICE;
 1315                                         ccb->cgdl.generation =
 1316                                                 device->generation;
 1317                                         ccb->cgdl.index = i;
 1318                                         /*
 1319                                          * Fill in some CCB header fields
 1320                                          * that the user may want.
 1321                                          */
 1322                                         ccb->ccb_h.path_id =
 1323                                                 periph->path->bus->path_id;
 1324                                         ccb->ccb_h.target_id =
 1325                                                 periph->path->target->target_id;
 1326                                         ccb->ccb_h.target_lun =
 1327                                                 periph->path->device->lun_id;
 1328                                         ccb->ccb_h.status = CAM_REQ_CMP;
 1329                                         break;
 1330                                 }
 1331                         }
 1332                 }
 1333 
 1334                 /*
 1335                  * If the periph is null here, one of two things has
 1336                  * happened.  The first possibility is that we couldn't
 1337                  * find the unit number of the particular peripheral driver
 1338                  * that the user is asking about.  e.g. the user asks for
 1339                  * the passthrough driver for "da11".  We find the list of
 1340                  * "da" peripherals all right, but there is no unit 11.
 1341                  * The other possibility is that we went through the list
 1342                  * of peripheral drivers attached to the device structure,
 1343                  * but didn't find one with the name "pass".  Either way,
 1344                  * we return ENOENT, since we couldn't find something.
 1345                  */
 1346                 if (periph == NULL) {
 1347                         ccb->ccb_h.status = CAM_REQ_CMP_ERR;
 1348                         ccb->cgdl.status = CAM_GDEVLIST_ERROR;
 1349                         *ccb->cgdl.periph_name = '\0';
 1350                         ccb->cgdl.unit_number = 0;
 1351                         error = ENOENT;
 1352                         /*
 1353                          * It is unfortunate that this is even necessary,
 1354                          * but there are many, many clueless users out there.
 1355                          * If this is true, the user is looking for the
 1356                          * passthrough driver, but doesn't have one in his
 1357                          * kernel.
 1358                          */
 1359                         if (base_periph_found == 1) {
 1360                                 printf("xptioctl: pass driver is not in the "
 1361                                        "kernel\n");
 1362                                 printf("xptioctl: put \"device pass\" in "
 1363                                        "your kernel config file\n");
 1364                         }
 1365                 }
 1366                 mtx_unlock(&xsoftc.xpt_topo_lock);
 1367                 break;
 1368                 }
 1369         default:
 1370                 error = ENOTTY;
 1371                 break;
 1372         }
 1373 
 1374         return(error);
 1375 }
 1376 
 1377 static int
 1378 cam_module_event_handler(module_t mod, int what, void *arg)
 1379 {
 1380         int error;
 1381 
 1382         switch (what) {
 1383         case MOD_LOAD:
 1384                 if ((error = xpt_init(NULL)) != 0)
 1385                         return (error);
 1386                 break;
 1387         case MOD_UNLOAD:
 1388                 return EBUSY;
 1389         default:
 1390                 return EOPNOTSUPP;
 1391         }
 1392 
 1393         return 0;
 1394 }
 1395 
 1396 /* thread to handle bus rescans */
 1397 static void
 1398 xpt_scanner_thread(void *dummy)
 1399 {
 1400         cam_isrq_t      queue;
 1401         union ccb       *ccb;
 1402         struct cam_sim  *sim;
 1403 
 1404         for (;;) {
 1405                 /*
 1406                  * Wait for a rescan request to come in.  When it does, splice
 1407                  * it onto a queue from local storage so that the xpt lock
 1408                  * doesn't need to be held while the requests are being
 1409                  * processed.
 1410                  */
 1411                 xpt_lock_buses();
 1412                 msleep(&xsoftc.ccb_scanq, &xsoftc.xpt_topo_lock, PRIBIO,
 1413                     "ccb_scanq", 0);
 1414                 TAILQ_INIT(&queue);
 1415                 TAILQ_CONCAT(&queue, &xsoftc.ccb_scanq, sim_links.tqe);
 1416                 xpt_unlock_buses();
 1417 
 1418                 while ((ccb = (union ccb *)TAILQ_FIRST(&queue)) != NULL) {
 1419                         TAILQ_REMOVE(&queue, &ccb->ccb_h, sim_links.tqe);
 1420 
 1421                         sim = ccb->ccb_h.path->bus->sim;
 1422                         CAM_SIM_LOCK(sim);
 1423 
 1424                         ccb->ccb_h.func_code = XPT_SCAN_BUS;
 1425                         ccb->ccb_h.cbfcnp = xptdone;
 1426                         xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, 5);
 1427                         cam_periph_runccb(ccb, NULL, 0, 0, NULL);
 1428                         xpt_free_path(ccb->ccb_h.path);
 1429                         xpt_free_ccb(ccb);
 1430                         CAM_SIM_UNLOCK(sim);
 1431                 }
 1432         }
 1433 }
 1434 
 1435 void
 1436 xpt_rescan(union ccb *ccb)
 1437 {
 1438         struct ccb_hdr *hdr;
 1439 
 1440         /*
 1441          * Don't make duplicate entries for the same paths.
 1442          */
 1443         xpt_lock_buses();
 1444         TAILQ_FOREACH(hdr, &xsoftc.ccb_scanq, sim_links.tqe) {
 1445                 if (xpt_path_comp(hdr->path, ccb->ccb_h.path) == 0) {
 1446                         xpt_unlock_buses();
 1447                         xpt_print(ccb->ccb_h.path, "rescan already queued\n");
 1448                         xpt_free_path(ccb->ccb_h.path);
 1449                         xpt_free_ccb(ccb);
 1450                         return;
 1451                 }
 1452         }
 1453         TAILQ_INSERT_TAIL(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe);
 1454         wakeup(&xsoftc.ccb_scanq);
 1455         xpt_unlock_buses();
 1456 }
 1457 
 1458 /* Functions accessed by the peripheral drivers */
 1459 static int
 1460 xpt_init(void *dummy)
 1461 {
 1462         struct cam_sim *xpt_sim;
 1463         struct cam_path *path;
 1464         struct cam_devq *devq;
 1465         cam_status status;
 1466 
 1467         TAILQ_INIT(&xsoftc.xpt_busses);
 1468         TAILQ_INIT(&cam_simq);
 1469         TAILQ_INIT(&xsoftc.ccb_scanq);
 1470         STAILQ_INIT(&xsoftc.highpowerq);
 1471         xsoftc.num_highpower = CAM_MAX_HIGHPOWER;
 1472 
 1473         mtx_init(&cam_simq_lock, "CAM SIMQ lock", NULL, MTX_DEF);
 1474         mtx_init(&xsoftc.xpt_lock, "XPT lock", NULL, MTX_DEF);
 1475         mtx_init(&xsoftc.xpt_topo_lock, "XPT topology lock", NULL, MTX_DEF);
 1476 
 1477         /*
 1478          * The xpt layer is, itself, the equivelent of a SIM.
 1479          * Allow 16 ccbs in the ccb pool for it.  This should
 1480          * give decent parallelism when we probe busses and
 1481          * perform other XPT functions.
 1482          */
 1483         devq = cam_simq_alloc(16);
 1484         xpt_sim = cam_sim_alloc(xptaction,
 1485                                 xptpoll,
 1486                                 "xpt",
 1487                                 /*softc*/NULL,
 1488                                 /*unit*/0,
 1489                                 /*mtx*/&xsoftc.xpt_lock,
 1490                                 /*max_dev_transactions*/0,
 1491                                 /*max_tagged_dev_transactions*/0,
 1492                                 devq);
 1493         if (xpt_sim == NULL)
 1494                 return (ENOMEM);
 1495 
 1496         xpt_sim->max_ccbs = 16;
 1497 
 1498         mtx_lock(&xsoftc.xpt_lock);
 1499         if ((status = xpt_bus_register(xpt_sim, NULL, 0)) != CAM_SUCCESS) {
 1500                 printf("xpt_init: xpt_bus_register failed with status %#x,"
 1501                        " failing attach\n", status);
 1502                 return (EINVAL);
 1503         }
 1504 
 1505         /*
 1506          * Looking at the XPT from the SIM layer, the XPT is
 1507          * the equivelent of a peripheral driver.  Allocate
 1508          * a peripheral driver entry for us.
 1509          */
 1510         if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID,
 1511                                       CAM_TARGET_WILDCARD,
 1512                                       CAM_LUN_WILDCARD)) != CAM_REQ_CMP) {
 1513                 printf("xpt_init: xpt_create_path failed with status %#x,"
 1514                        " failing attach\n", status);
 1515                 return (EINVAL);
 1516         }
 1517 
 1518         cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO,
 1519                          path, NULL, 0, xpt_sim);
 1520         xpt_free_path(path);
 1521         mtx_unlock(&xsoftc.xpt_lock);
 1522 
 1523         /*
 1524          * Register a callback for when interrupts are enabled.
 1525          */
 1526         xsoftc.xpt_config_hook =
 1527             (struct intr_config_hook *)malloc(sizeof(struct intr_config_hook),
 1528                                               M_CAMXPT, M_NOWAIT | M_ZERO);
 1529         if (xsoftc.xpt_config_hook == NULL) {
 1530                 printf("xpt_init: Cannot malloc config hook "
 1531                        "- failing attach\n");
 1532                 return (ENOMEM);
 1533         }
 1534 
 1535         xsoftc.xpt_config_hook->ich_func = xpt_config;
 1536         if (config_intrhook_establish(xsoftc.xpt_config_hook) != 0) {
 1537                 free (xsoftc.xpt_config_hook, M_CAMXPT);
 1538                 printf("xpt_init: config_intrhook_establish failed "
 1539                        "- failing attach\n");
 1540         }
 1541 
 1542         /* fire up rescan thread */
 1543         if (kthread_create(xpt_scanner_thread, NULL, NULL, 0, 0, "xpt_thrd")) {
 1544                 printf("xpt_init: failed to create rescan thread\n");
 1545         }
 1546         /* Install our software interrupt handlers */
 1547         swi_add(NULL, "cambio", camisr, NULL, SWI_CAMBIO, INTR_MPSAFE, &cambio_ih);
 1548 
 1549         return (0);
 1550 }
 1551 
 1552 static cam_status
 1553 xptregister(struct cam_periph *periph, void *arg)
 1554 {
 1555         struct cam_sim *xpt_sim;
 1556 
 1557         if (periph == NULL) {
 1558                 printf("xptregister: periph was NULL!!\n");
 1559                 return(CAM_REQ_CMP_ERR);
 1560         }
 1561 
 1562         xpt_sim = (struct cam_sim *)arg;
 1563         xpt_sim->softc = periph;
 1564         xpt_periph = periph;
 1565         periph->softc = NULL;
 1566 
 1567         return(CAM_REQ_CMP);
 1568 }
 1569 
 1570 int32_t
 1571 xpt_add_periph(struct cam_periph *periph)
 1572 {
 1573         struct cam_ed *device;
 1574         int32_t  status;
 1575         struct periph_list *periph_head;
 1576 
 1577         mtx_assert(periph->sim->mtx, MA_OWNED);
 1578 
 1579         device = periph->path->device;
 1580 
 1581         periph_head = &device->periphs;
 1582 
 1583         status = CAM_REQ_CMP;
 1584 
 1585         if (device != NULL) {
 1586                 /*
 1587                  * Make room for this peripheral
 1588                  * so it will fit in the queue
 1589                  * when it's scheduled to run
 1590                  */
 1591                 status = camq_resize(&device->drvq,
 1592                                      device->drvq.array_size + 1);
 1593 
 1594                 device->generation++;
 1595 
 1596                 SLIST_INSERT_HEAD(periph_head, periph, periph_links);
 1597         }
 1598 
 1599         mtx_lock(&xsoftc.xpt_topo_lock);
 1600         xsoftc.xpt_generation++;
 1601         mtx_unlock(&xsoftc.xpt_topo_lock);
 1602 
 1603         return (status);
 1604 }
 1605 
 1606 void
 1607 xpt_remove_periph(struct cam_periph *periph)
 1608 {
 1609         struct cam_ed *device;
 1610 
 1611         mtx_assert(periph->sim->mtx, MA_OWNED);
 1612 
 1613         device = periph->path->device;
 1614 
 1615         if (device != NULL) {
 1616                 struct periph_list *periph_head;
 1617 
 1618                 periph_head = &device->periphs;
 1619 
 1620                 /* Release the slot for this peripheral */
 1621                 camq_resize(&device->drvq, device->drvq.array_size - 1);
 1622 
 1623                 device->generation++;
 1624 
 1625                 SLIST_REMOVE(periph_head, periph, cam_periph, periph_links);
 1626         }
 1627 
 1628         mtx_lock(&xsoftc.xpt_topo_lock);
 1629         xsoftc.xpt_generation++;
 1630         mtx_unlock(&xsoftc.xpt_topo_lock);
 1631 }
 1632 
 1633 
 1634 void
 1635 xpt_announce_periph(struct cam_periph *periph, char *announce_string)
 1636 {
 1637         struct  ccb_pathinq cpi;
 1638         struct  ccb_trans_settings cts;
 1639         struct  cam_path *path;
 1640         u_int   speed;
 1641         u_int   freq;
 1642         u_int   mb;
 1643 
 1644         mtx_assert(periph->sim->mtx, MA_OWNED);
 1645 
 1646         path = periph->path;
 1647         /*
 1648          * To ensure that this is printed in one piece,
 1649          * mask out CAM interrupts.
 1650          */
 1651         printf("%s%d at %s%d bus %d target %d lun %d\n",
 1652                periph->periph_name, periph->unit_number,
 1653                path->bus->sim->sim_name,
 1654                path->bus->sim->unit_number,
 1655                path->bus->sim->bus_id,
 1656                path->target->target_id,
 1657                path->device->lun_id);
 1658         printf("%s%d: ", periph->periph_name, periph->unit_number);
 1659         scsi_print_inquiry(&path->device->inq_data);
 1660         if (bootverbose && path->device->serial_num_len > 0) {
 1661                 /* Don't wrap the screen  - print only the first 60 chars */
 1662                 printf("%s%d: Serial Number %.60s\n", periph->periph_name,
 1663                        periph->unit_number, path->device->serial_num);
 1664         }
 1665         xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1);
 1666         cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
 1667         cts.type = CTS_TYPE_CURRENT_SETTINGS;
 1668         xpt_action((union ccb*)&cts);
 1669         if ((cts.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
 1670                 return;
 1671         }
 1672 
 1673         /* Ask the SIM for its base transfer speed */
 1674         xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
 1675         cpi.ccb_h.func_code = XPT_PATH_INQ;
 1676         xpt_action((union ccb *)&cpi);
 1677 
 1678         speed = cpi.base_transfer_speed;
 1679         freq = 0;
 1680         if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SPI) {
 1681                 struct  ccb_trans_settings_spi *spi;
 1682 
 1683                 spi = &cts.xport_specific.spi;
 1684                 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0
 1685                   && spi->sync_offset != 0) {
 1686                         freq = scsi_calc_syncsrate(spi->sync_period);
 1687                         speed = freq;
 1688                 }
 1689 
 1690                 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0)
 1691                         speed *= (0x01 << spi->bus_width);
 1692         }
 1693 
 1694         if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_FC) {
 1695                 struct  ccb_trans_settings_fc *fc = &cts.xport_specific.fc;
 1696                 if (fc->valid & CTS_FC_VALID_SPEED) {
 1697                         speed = fc->bitrate;
 1698                 }
 1699         }
 1700 
 1701         if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SAS) {
 1702                 struct  ccb_trans_settings_sas *sas = &cts.xport_specific.sas;
 1703                 if (sas->valid & CTS_SAS_VALID_SPEED) {
 1704                         speed = sas->bitrate;
 1705                 }
 1706         }
 1707 
 1708         mb = speed / 1000;
 1709         if (mb > 0)
 1710                 printf("%s%d: %d.%03dMB/s transfers",
 1711                        periph->periph_name, periph->unit_number,
 1712                        mb, speed % 1000);
 1713         else
 1714                 printf("%s%d: %dKB/s transfers", periph->periph_name,
 1715                        periph->unit_number, speed);
 1716         /* Report additional information about SPI connections */
 1717         if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SPI) {
 1718                 struct  ccb_trans_settings_spi *spi;
 1719 
 1720                 spi = &cts.xport_specific.spi;
 1721                 if (freq != 0) {
 1722                         printf(" (%d.%03dMHz%s, offset %d", freq / 1000,
 1723                                freq % 1000,
 1724                                (spi->ppr_options & MSG_EXT_PPR_DT_REQ) != 0
 1725                              ? " DT" : "",
 1726                                spi->sync_offset);
 1727                 }
 1728                 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0
 1729                  && spi->bus_width > 0) {
 1730                         if (freq != 0) {
 1731                                 printf(", ");
 1732                         } else {
 1733                                 printf(" (");
 1734                         }
 1735                         printf("%dbit)", 8 * (0x01 << spi->bus_width));
 1736                 } else if (freq != 0) {
 1737                         printf(")");
 1738                 }
 1739         }
 1740         if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_FC) {
 1741                 struct  ccb_trans_settings_fc *fc;
 1742 
 1743                 fc = &cts.xport_specific.fc;
 1744                 if (fc->valid & CTS_FC_VALID_WWNN)
 1745                         printf(" WWNN 0x%llx", (long long) fc->wwnn);
 1746                 if (fc->valid & CTS_FC_VALID_WWPN)
 1747                         printf(" WWPN 0x%llx", (long long) fc->wwpn);
 1748                 if (fc->valid & CTS_FC_VALID_PORT)
 1749                         printf(" PortID 0x%x", fc->port);
 1750         }
 1751 
 1752         if (path->device->inq_flags & SID_CmdQue
 1753          || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
 1754                 printf("\n%s%d: Command Queueing Enabled",
 1755                        periph->periph_name, periph->unit_number);
 1756         }
 1757         printf("\n");
 1758 
 1759         /*
 1760          * We only want to print the caller's announce string if they've
 1761          * passed one in..
 1762          */
 1763         if (announce_string != NULL)
 1764                 printf("%s%d: %s\n", periph->periph_name,
 1765                        periph->unit_number, announce_string);
 1766 }
 1767 
 1768 static dev_match_ret
 1769 xptbusmatch(struct dev_match_pattern *patterns, u_int num_patterns,
 1770             struct cam_eb *bus)
 1771 {
 1772         dev_match_ret retval;
 1773         int i;
 1774 
 1775         retval = DM_RET_NONE;
 1776 
 1777         /*
 1778          * If we aren't given something to match against, that's an error.
 1779          */
 1780         if (bus == NULL)
 1781                 return(DM_RET_ERROR);
 1782 
 1783         /*
 1784          * If there are no match entries, then this bus matches no
 1785          * matter what.
 1786          */
 1787         if ((patterns == NULL) || (num_patterns == 0))
 1788                 return(DM_RET_DESCEND | DM_RET_COPY);
 1789 
 1790         for (i = 0; i < num_patterns; i++) {
 1791                 struct bus_match_pattern *cur_pattern;
 1792 
 1793                 /*
 1794                  * If the pattern in question isn't for a bus node, we
 1795                  * aren't interested.  However, we do indicate to the
 1796                  * calling routine that we should continue descending the
 1797                  * tree, since the user wants to match against lower-level
 1798                  * EDT elements.
 1799                  */
 1800                 if (patterns[i].type != DEV_MATCH_BUS) {
 1801                         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
 1802                                 retval |= DM_RET_DESCEND;
 1803                         continue;
 1804                 }
 1805 
 1806                 cur_pattern = &patterns[i].pattern.bus_pattern;
 1807 
 1808                 /*
 1809                  * If they want to match any bus node, we give them any
 1810                  * device node.
 1811                  */
 1812                 if (cur_pattern->flags == BUS_MATCH_ANY) {
 1813                         /* set the copy flag */
 1814                         retval |= DM_RET_COPY;
 1815 
 1816                         /*
 1817                          * If we've already decided on an action, go ahead
 1818                          * and return.
 1819                          */
 1820                         if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
 1821                                 return(retval);
 1822                 }
 1823 
 1824                 /*
 1825                  * Not sure why someone would do this...
 1826                  */
 1827                 if (cur_pattern->flags == BUS_MATCH_NONE)
 1828                         continue;
 1829 
 1830                 if (((cur_pattern->flags & BUS_MATCH_PATH) != 0)
 1831                  && (cur_pattern->path_id != bus->path_id))
 1832                         continue;
 1833 
 1834                 if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0)
 1835                  && (cur_pattern->bus_id != bus->sim->bus_id))
 1836                         continue;
 1837 
 1838                 if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0)
 1839                  && (cur_pattern->unit_number != bus->sim->unit_number))
 1840                         continue;
 1841 
 1842                 if (((cur_pattern->flags & BUS_MATCH_NAME) != 0)
 1843                  && (strncmp(cur_pattern->dev_name, bus->sim->sim_name,
 1844                              DEV_IDLEN) != 0))
 1845                         continue;
 1846 
 1847                 /*
 1848                  * If we get to this point, the user definitely wants
 1849                  * information on this bus.  So tell the caller to copy the
 1850                  * data out.
 1851                  */
 1852                 retval |= DM_RET_COPY;
 1853 
 1854                 /*
 1855                  * If the return action has been set to descend, then we
 1856                  * know that we've already seen a non-bus matching
 1857                  * expression, therefore we need to further descend the tree.
 1858                  * This won't change by continuing around the loop, so we
 1859                  * go ahead and return.  If we haven't seen a non-bus
 1860                  * matching expression, we keep going around the loop until
 1861                  * we exhaust the matching expressions.  We'll set the stop
 1862                  * flag once we fall out of the loop.
 1863                  */
 1864                 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
 1865                         return(retval);
 1866         }
 1867 
 1868         /*
 1869          * If the return action hasn't been set to descend yet, that means
 1870          * we haven't seen anything other than bus matching patterns.  So
 1871          * tell the caller to stop descending the tree -- the user doesn't
 1872          * want to match against lower level tree elements.
 1873          */
 1874         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
 1875                 retval |= DM_RET_STOP;
 1876 
 1877         return(retval);
 1878 }
 1879 
 1880 static dev_match_ret
 1881 xptdevicematch(struct dev_match_pattern *patterns, u_int num_patterns,
 1882                struct cam_ed *device)
 1883 {
 1884         dev_match_ret retval;
 1885         int i;
 1886 
 1887         retval = DM_RET_NONE;
 1888 
 1889         /*
 1890          * If we aren't given something to match against, that's an error.
 1891          */
 1892         if (device == NULL)
 1893                 return(DM_RET_ERROR);
 1894 
 1895         /*
 1896          * If there are no match entries, then this device matches no
 1897          * matter what.
 1898          */
 1899         if ((patterns == NULL) || (num_patterns == 0))
 1900                 return(DM_RET_DESCEND | DM_RET_COPY);
 1901 
 1902         for (i = 0; i < num_patterns; i++) {
 1903                 struct device_match_pattern *cur_pattern;
 1904 
 1905                 /*
 1906                  * If the pattern in question isn't for a device node, we
 1907                  * aren't interested.
 1908                  */
 1909                 if (patterns[i].type != DEV_MATCH_DEVICE) {
 1910                         if ((patterns[i].type == DEV_MATCH_PERIPH)
 1911                          && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE))
 1912                                 retval |= DM_RET_DESCEND;
 1913                         continue;
 1914                 }
 1915 
 1916                 cur_pattern = &patterns[i].pattern.device_pattern;
 1917 
 1918                 /*
 1919                  * If they want to match any device node, we give them any
 1920                  * device node.
 1921                  */
 1922                 if (cur_pattern->flags == DEV_MATCH_ANY) {
 1923                         /* set the copy flag */
 1924                         retval |= DM_RET_COPY;
 1925 
 1926 
 1927                         /*
 1928                          * If we've already decided on an action, go ahead
 1929                          * and return.
 1930                          */
 1931                         if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
 1932                                 return(retval);
 1933                 }
 1934 
 1935                 /*
 1936                  * Not sure why someone would do this...
 1937                  */
 1938                 if (cur_pattern->flags == DEV_MATCH_NONE)
 1939                         continue;
 1940 
 1941                 if (((cur_pattern->flags & DEV_MATCH_PATH) != 0)
 1942                  && (cur_pattern->path_id != device->target->bus->path_id))
 1943                         continue;
 1944 
 1945                 if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0)
 1946                  && (cur_pattern->target_id != device->target->target_id))
 1947                         continue;
 1948 
 1949                 if (((cur_pattern->flags & DEV_MATCH_LUN) != 0)
 1950                  && (cur_pattern->target_lun != device->lun_id))
 1951                         continue;
 1952 
 1953                 if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0)
 1954                  && (cam_quirkmatch((caddr_t)&device->inq_data,
 1955                                     (caddr_t)&cur_pattern->inq_pat,
 1956                                     1, sizeof(cur_pattern->inq_pat),
 1957                                     scsi_static_inquiry_match) == NULL))
 1958                         continue;
 1959 
 1960                 /*
 1961                  * If we get to this point, the user definitely wants
 1962                  * information on this device.  So tell the caller to copy
 1963                  * the data out.
 1964                  */
 1965                 retval |= DM_RET_COPY;
 1966 
 1967                 /*
 1968                  * If the return action has been set to descend, then we
 1969                  * know that we've already seen a peripheral matching
 1970                  * expression, therefore we need to further descend the tree.
 1971                  * This won't change by continuing around the loop, so we
 1972                  * go ahead and return.  If we haven't seen a peripheral
 1973                  * matching expression, we keep going around the loop until
 1974                  * we exhaust the matching expressions.  We'll set the stop
 1975                  * flag once we fall out of the loop.
 1976                  */
 1977                 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
 1978                         return(retval);
 1979         }
 1980 
 1981         /*
 1982          * If the return action hasn't been set to descend yet, that means
 1983          * we haven't seen any peripheral matching patterns.  So tell the
 1984          * caller to stop descending the tree -- the user doesn't want to
 1985          * match against lower level tree elements.
 1986          */
 1987         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
 1988                 retval |= DM_RET_STOP;
 1989 
 1990         return(retval);
 1991 }
 1992 
 1993 /*
 1994  * Match a single peripheral against any number of match patterns.
 1995  */
 1996 static dev_match_ret
 1997 xptperiphmatch(struct dev_match_pattern *patterns, u_int num_patterns,
 1998                struct cam_periph *periph)
 1999 {
 2000         dev_match_ret retval;
 2001         int i;
 2002 
 2003         /*
 2004          * If we aren't given something to match against, that's an error.
 2005          */
 2006         if (periph == NULL)
 2007                 return(DM_RET_ERROR);
 2008 
 2009         /*
 2010          * If there are no match entries, then this peripheral matches no
 2011          * matter what.
 2012          */
 2013         if ((patterns == NULL) || (num_patterns == 0))
 2014                 return(DM_RET_STOP | DM_RET_COPY);
 2015 
 2016         /*
 2017          * There aren't any nodes below a peripheral node, so there's no
 2018          * reason to descend the tree any further.
 2019          */
 2020         retval = DM_RET_STOP;
 2021 
 2022         for (i = 0; i < num_patterns; i++) {
 2023                 struct periph_match_pattern *cur_pattern;
 2024 
 2025                 /*
 2026                  * If the pattern in question isn't for a peripheral, we
 2027                  * aren't interested.
 2028                  */
 2029                 if (patterns[i].type != DEV_MATCH_PERIPH)
 2030                         continue;
 2031 
 2032                 cur_pattern = &patterns[i].pattern.periph_pattern;
 2033 
 2034                 /*
 2035                  * If they want to match on anything, then we will do so.
 2036                  */
 2037                 if (cur_pattern->flags == PERIPH_MATCH_ANY) {
 2038                         /* set the copy flag */
 2039                         retval |= DM_RET_COPY;
 2040 
 2041                         /*
 2042                          * We've already set the return action to stop,
 2043                          * since there are no nodes below peripherals in
 2044                          * the tree.
 2045                          */
 2046                         return(retval);
 2047                 }
 2048 
 2049                 /*
 2050                  * Not sure why someone would do this...
 2051                  */
 2052                 if (cur_pattern->flags == PERIPH_MATCH_NONE)
 2053                         continue;
 2054 
 2055                 if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0)
 2056                  && (cur_pattern->path_id != periph->path->bus->path_id))
 2057                         continue;
 2058 
 2059                 /*
 2060                  * For the target and lun id's, we have to make sure the
 2061                  * target and lun pointers aren't NULL.  The xpt peripheral
 2062                  * has a wildcard target and device.
 2063                  */
 2064                 if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0)
 2065                  && ((periph->path->target == NULL)
 2066                  ||(cur_pattern->target_id != periph->path->target->target_id)))
 2067                         continue;
 2068 
 2069                 if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0)
 2070                  && ((periph->path->device == NULL)
 2071                  || (cur_pattern->target_lun != periph->path->device->lun_id)))
 2072                         continue;
 2073 
 2074                 if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0)
 2075                  && (cur_pattern->unit_number != periph->unit_number))
 2076                         continue;
 2077 
 2078                 if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0)
 2079                  && (strncmp(cur_pattern->periph_name, periph->periph_name,
 2080                              DEV_IDLEN) != 0))
 2081                         continue;
 2082 
 2083                 /*
 2084                  * If we get to this point, the user definitely wants
 2085                  * information on this peripheral.  So tell the caller to
 2086                  * copy the data out.
 2087                  */
 2088                 retval |= DM_RET_COPY;
 2089 
 2090                 /*
 2091                  * The return action has already been set to stop, since
 2092                  * peripherals don't have any nodes below them in the EDT.
 2093                  */
 2094                 return(retval);
 2095         }
 2096 
 2097         /*
 2098          * If we get to this point, the peripheral that was passed in
 2099          * doesn't match any of the patterns.
 2100          */
 2101         return(retval);
 2102 }
 2103 
 2104 static int
 2105 xptedtbusfunc(struct cam_eb *bus, void *arg)
 2106 {
 2107         struct ccb_dev_match *cdm;
 2108         dev_match_ret retval;
 2109 
 2110         cdm = (struct ccb_dev_match *)arg;
 2111 
 2112         /*
 2113          * If our position is for something deeper in the tree, that means
 2114          * that we've already seen this node.  So, we keep going down.
 2115          */
 2116         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 2117          && (cdm->pos.cookie.bus == bus)
 2118          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 2119          && (cdm->pos.cookie.target != NULL))
 2120                 retval = DM_RET_DESCEND;
 2121         else
 2122                 retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus);
 2123 
 2124         /*
 2125          * If we got an error, bail out of the search.
 2126          */
 2127         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
 2128                 cdm->status = CAM_DEV_MATCH_ERROR;
 2129                 return(0);
 2130         }
 2131 
 2132         /*
 2133          * If the copy flag is set, copy this bus out.
 2134          */
 2135         if (retval & DM_RET_COPY) {
 2136                 int spaceleft, j;
 2137 
 2138                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
 2139                         sizeof(struct dev_match_result));
 2140 
 2141                 /*
 2142                  * If we don't have enough space to put in another
 2143                  * match result, save our position and tell the
 2144                  * user there are more devices to check.
 2145                  */
 2146                 if (spaceleft < sizeof(struct dev_match_result)) {
 2147                         bzero(&cdm->pos, sizeof(cdm->pos));
 2148                         cdm->pos.position_type =
 2149                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS;
 2150 
 2151                         cdm->pos.cookie.bus = bus;
 2152                         cdm->pos.generations[CAM_BUS_GENERATION]=
 2153                                 xsoftc.bus_generation;
 2154                         cdm->status = CAM_DEV_MATCH_MORE;
 2155                         return(0);
 2156                 }
 2157                 j = cdm->num_matches;
 2158                 cdm->num_matches++;
 2159                 cdm->matches[j].type = DEV_MATCH_BUS;
 2160                 cdm->matches[j].result.bus_result.path_id = bus->path_id;
 2161                 cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id;
 2162                 cdm->matches[j].result.bus_result.unit_number =
 2163                         bus->sim->unit_number;
 2164                 strncpy(cdm->matches[j].result.bus_result.dev_name,
 2165                         bus->sim->sim_name, DEV_IDLEN);
 2166         }
 2167 
 2168         /*
 2169          * If the user is only interested in busses, there's no
 2170          * reason to descend to the next level in the tree.
 2171          */
 2172         if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
 2173                 return(1);
 2174 
 2175         /*
 2176          * If there is a target generation recorded, check it to
 2177          * make sure the target list hasn't changed.
 2178          */
 2179         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 2180          && (bus == cdm->pos.cookie.bus)
 2181          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 2182          && (cdm->pos.generations[CAM_TARGET_GENERATION] != 0)
 2183          && (cdm->pos.generations[CAM_TARGET_GENERATION] !=
 2184              bus->generation)) {
 2185                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
 2186                 return(0);
 2187         }
 2188 
 2189         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 2190          && (cdm->pos.cookie.bus == bus)
 2191          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 2192          && (cdm->pos.cookie.target != NULL))
 2193                 return(xpttargettraverse(bus,
 2194                                         (struct cam_et *)cdm->pos.cookie.target,
 2195                                          xptedttargetfunc, arg));
 2196         else
 2197                 return(xpttargettraverse(bus, NULL, xptedttargetfunc, arg));
 2198 }
 2199 
 2200 static int
 2201 xptedttargetfunc(struct cam_et *target, void *arg)
 2202 {
 2203         struct ccb_dev_match *cdm;
 2204 
 2205         cdm = (struct ccb_dev_match *)arg;
 2206 
 2207         /*
 2208          * If there is a device list generation recorded, check it to
 2209          * make sure the device list hasn't changed.
 2210          */
 2211         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 2212          && (cdm->pos.cookie.bus == target->bus)
 2213          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 2214          && (cdm->pos.cookie.target == target)
 2215          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
 2216          && (cdm->pos.generations[CAM_DEV_GENERATION] != 0)
 2217          && (cdm->pos.generations[CAM_DEV_GENERATION] !=
 2218              target->generation)) {
 2219                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
 2220                 return(0);
 2221         }
 2222 
 2223         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 2224          && (cdm->pos.cookie.bus == target->bus)
 2225          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 2226          && (cdm->pos.cookie.target == target)
 2227          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
 2228          && (cdm->pos.cookie.device != NULL))
 2229                 return(xptdevicetraverse(target,
 2230                                         (struct cam_ed *)cdm->pos.cookie.device,
 2231                                          xptedtdevicefunc, arg));
 2232         else
 2233                 return(xptdevicetraverse(target, NULL, xptedtdevicefunc, arg));
 2234 }
 2235 
 2236 static int
 2237 xptedtdevicefunc(struct cam_ed *device, void *arg)
 2238 {
 2239 
 2240         struct ccb_dev_match *cdm;
 2241         dev_match_ret retval;
 2242 
 2243         cdm = (struct ccb_dev_match *)arg;
 2244 
 2245         /*
 2246          * If our position is for something deeper in the tree, that means
 2247          * that we've already seen this node.  So, we keep going down.
 2248          */
 2249         if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE)
 2250          && (cdm->pos.cookie.device == device)
 2251          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
 2252          && (cdm->pos.cookie.periph != NULL))
 2253                 retval = DM_RET_DESCEND;
 2254         else
 2255                 retval = xptdevicematch(cdm->patterns, cdm->num_patterns,
 2256                                         device);
 2257 
 2258         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
 2259                 cdm->status = CAM_DEV_MATCH_ERROR;
 2260                 return(0);
 2261         }
 2262 
 2263         /*
 2264          * If the copy flag is set, copy this device out.
 2265          */
 2266         if (retval & DM_RET_COPY) {
 2267                 int spaceleft, j;
 2268 
 2269                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
 2270                         sizeof(struct dev_match_result));
 2271 
 2272                 /*
 2273                  * If we don't have enough space to put in another
 2274                  * match result, save our position and tell the
 2275                  * user there are more devices to check.
 2276                  */
 2277                 if (spaceleft < sizeof(struct dev_match_result)) {
 2278                         bzero(&cdm->pos, sizeof(cdm->pos));
 2279                         cdm->pos.position_type =
 2280                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
 2281                                 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE;
 2282 
 2283                         cdm->pos.cookie.bus = device->target->bus;
 2284                         cdm->pos.generations[CAM_BUS_GENERATION]=
 2285                                 xsoftc.bus_generation;
 2286                         cdm->pos.cookie.target = device->target;
 2287                         cdm->pos.generations[CAM_TARGET_GENERATION] =
 2288                                 device->target->bus->generation;
 2289                         cdm->pos.cookie.device = device;
 2290                         cdm->pos.generations[CAM_DEV_GENERATION] =
 2291                                 device->target->generation;
 2292                         cdm->status = CAM_DEV_MATCH_MORE;
 2293                         return(0);
 2294                 }
 2295                 j = cdm->num_matches;
 2296                 cdm->num_matches++;
 2297                 cdm->matches[j].type = DEV_MATCH_DEVICE;
 2298                 cdm->matches[j].result.device_result.path_id =
 2299                         device->target->bus->path_id;
 2300                 cdm->matches[j].result.device_result.target_id =
 2301                         device->target->target_id;
 2302                 cdm->matches[j].result.device_result.target_lun =
 2303                         device->lun_id;
 2304                 bcopy(&device->inq_data,
 2305                       &cdm->matches[j].result.device_result.inq_data,
 2306                       sizeof(struct scsi_inquiry_data));
 2307 
 2308                 /* Let the user know whether this device is unconfigured */
 2309                 if (device->flags & CAM_DEV_UNCONFIGURED)
 2310                         cdm->matches[j].result.device_result.flags =
 2311                                 DEV_RESULT_UNCONFIGURED;
 2312                 else
 2313                         cdm->matches[j].result.device_result.flags =
 2314                                 DEV_RESULT_NOFLAG;
 2315         }
 2316 
 2317         /*
 2318          * If the user isn't interested in peripherals, don't descend
 2319          * the tree any further.
 2320          */
 2321         if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
 2322                 return(1);
 2323 
 2324         /*
 2325          * If there is a peripheral list generation recorded, make sure
 2326          * it hasn't changed.
 2327          */
 2328         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 2329          && (device->target->bus == cdm->pos.cookie.bus)
 2330          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 2331          && (device->target == cdm->pos.cookie.target)
 2332          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
 2333          && (device == cdm->pos.cookie.device)
 2334          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
 2335          && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
 2336          && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
 2337              device->generation)){
 2338                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
 2339                 return(0);
 2340         }
 2341 
 2342         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 2343          && (cdm->pos.cookie.bus == device->target->bus)
 2344          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 2345          && (cdm->pos.cookie.target == device->target)
 2346          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
 2347          && (cdm->pos.cookie.device == device)
 2348          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
 2349          && (cdm->pos.cookie.periph != NULL))
 2350                 return(xptperiphtraverse(device,
 2351                                 (struct cam_periph *)cdm->pos.cookie.periph,
 2352                                 xptedtperiphfunc, arg));
 2353         else
 2354                 return(xptperiphtraverse(device, NULL, xptedtperiphfunc, arg));
 2355 }
 2356 
 2357 static int
 2358 xptedtperiphfunc(struct cam_periph *periph, void *arg)
 2359 {
 2360         struct ccb_dev_match *cdm;
 2361         dev_match_ret retval;
 2362 
 2363         cdm = (struct ccb_dev_match *)arg;
 2364 
 2365         retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
 2366 
 2367         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
 2368                 cdm->status = CAM_DEV_MATCH_ERROR;
 2369                 return(0);
 2370         }
 2371 
 2372         /*
 2373          * If the copy flag is set, copy this peripheral out.
 2374          */
 2375         if (retval & DM_RET_COPY) {
 2376                 int spaceleft, j;
 2377 
 2378                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
 2379                         sizeof(struct dev_match_result));
 2380 
 2381                 /*
 2382                  * If we don't have enough space to put in another
 2383                  * match result, save our position and tell the
 2384                  * user there are more devices to check.
 2385                  */
 2386                 if (spaceleft < sizeof(struct dev_match_result)) {
 2387                         bzero(&cdm->pos, sizeof(cdm->pos));
 2388                         cdm->pos.position_type =
 2389                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
 2390                                 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE |
 2391                                 CAM_DEV_POS_PERIPH;
 2392 
 2393                         cdm->pos.cookie.bus = periph->path->bus;
 2394                         cdm->pos.generations[CAM_BUS_GENERATION]=
 2395                                 xsoftc.bus_generation;
 2396                         cdm->pos.cookie.target = periph->path->target;
 2397                         cdm->pos.generations[CAM_TARGET_GENERATION] =
 2398                                 periph->path->bus->generation;
 2399                         cdm->pos.cookie.device = periph->path->device;
 2400                         cdm->pos.generations[CAM_DEV_GENERATION] =
 2401                                 periph->path->target->generation;
 2402                         cdm->pos.cookie.periph = periph;
 2403                         cdm->pos.generations[CAM_PERIPH_GENERATION] =
 2404                                 periph->path->device->generation;
 2405                         cdm->status = CAM_DEV_MATCH_MORE;
 2406                         return(0);
 2407                 }
 2408 
 2409                 j = cdm->num_matches;
 2410                 cdm->num_matches++;
 2411                 cdm->matches[j].type = DEV_MATCH_PERIPH;
 2412                 cdm->matches[j].result.periph_result.path_id =
 2413                         periph->path->bus->path_id;
 2414                 cdm->matches[j].result.periph_result.target_id =
 2415                         periph->path->target->target_id;
 2416                 cdm->matches[j].result.periph_result.target_lun =
 2417                         periph->path->device->lun_id;
 2418                 cdm->matches[j].result.periph_result.unit_number =
 2419                         periph->unit_number;
 2420                 strncpy(cdm->matches[j].result.periph_result.periph_name,
 2421                         periph->periph_name, DEV_IDLEN);
 2422         }
 2423 
 2424         return(1);
 2425 }
 2426 
 2427 static int
 2428 xptedtmatch(struct ccb_dev_match *cdm)
 2429 {
 2430         int ret;
 2431 
 2432         cdm->num_matches = 0;
 2433 
 2434         /*
 2435          * Check the bus list generation.  If it has changed, the user
 2436          * needs to reset everything and start over.
 2437          */
 2438         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 2439          && (cdm->pos.generations[CAM_BUS_GENERATION] != 0)
 2440          && (cdm->pos.generations[CAM_BUS_GENERATION] != xsoftc.bus_generation)) {
 2441                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
 2442                 return(0);
 2443         }
 2444 
 2445         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 2446          && (cdm->pos.cookie.bus != NULL))
 2447                 ret = xptbustraverse((struct cam_eb *)cdm->pos.cookie.bus,
 2448                                      xptedtbusfunc, cdm);
 2449         else
 2450                 ret = xptbustraverse(NULL, xptedtbusfunc, cdm);
 2451 
 2452         /*
 2453          * If we get back 0, that means that we had to stop before fully
 2454          * traversing the EDT.  It also means that one of the subroutines
 2455          * has set the status field to the proper value.  If we get back 1,
 2456          * we've fully traversed the EDT and copied out any matching entries.
 2457          */
 2458         if (ret == 1)
 2459                 cdm->status = CAM_DEV_MATCH_LAST;
 2460 
 2461         return(ret);
 2462 }
 2463 
 2464 static int
 2465 xptplistpdrvfunc(struct periph_driver **pdrv, void *arg)
 2466 {
 2467         struct ccb_dev_match *cdm;
 2468 
 2469         cdm = (struct ccb_dev_match *)arg;
 2470 
 2471         if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
 2472          && (cdm->pos.cookie.pdrv == pdrv)
 2473          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
 2474          && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
 2475          && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
 2476              (*pdrv)->generation)) {
 2477                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
 2478                 return(0);
 2479         }
 2480 
 2481         if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
 2482          && (cdm->pos.cookie.pdrv == pdrv)
 2483          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
 2484          && (cdm->pos.cookie.periph != NULL))
 2485                 return(xptpdperiphtraverse(pdrv,
 2486                                 (struct cam_periph *)cdm->pos.cookie.periph,
 2487                                 xptplistperiphfunc, arg));
 2488         else
 2489                 return(xptpdperiphtraverse(pdrv, NULL,xptplistperiphfunc, arg));
 2490 }
 2491 
 2492 static int
 2493 xptplistperiphfunc(struct cam_periph *periph, void *arg)
 2494 {
 2495         struct ccb_dev_match *cdm;
 2496         dev_match_ret retval;
 2497 
 2498         cdm = (struct ccb_dev_match *)arg;
 2499 
 2500         retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
 2501 
 2502         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
 2503                 cdm->status = CAM_DEV_MATCH_ERROR;
 2504                 return(0);
 2505         }
 2506 
 2507         /*
 2508          * If the copy flag is set, copy this peripheral out.
 2509          */
 2510         if (retval & DM_RET_COPY) {
 2511                 int spaceleft, j;
 2512 
 2513                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
 2514                         sizeof(struct dev_match_result));
 2515 
 2516                 /*
 2517                  * If we don't have enough space to put in another
 2518                  * match result, save our position and tell the
 2519                  * user there are more devices to check.
 2520                  */
 2521                 if (spaceleft < sizeof(struct dev_match_result)) {
 2522                         struct periph_driver **pdrv;
 2523 
 2524                         pdrv = NULL;
 2525                         bzero(&cdm->pos, sizeof(cdm->pos));
 2526                         cdm->pos.position_type =
 2527                                 CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR |
 2528                                 CAM_DEV_POS_PERIPH;
 2529 
 2530                         /*
 2531                          * This may look a bit non-sensical, but it is
 2532                          * actually quite logical.  There are very few
 2533                          * peripheral drivers, and bloating every peripheral
 2534                          * structure with a pointer back to its parent
 2535                          * peripheral driver linker set entry would cost
 2536                          * more in the long run than doing this quick lookup.
 2537                          */
 2538                         for (pdrv = periph_drivers; *pdrv != NULL; pdrv++) {
 2539                                 if (strcmp((*pdrv)->driver_name,
 2540                                     periph->periph_name) == 0)
 2541                                         break;
 2542                         }
 2543 
 2544                         if (*pdrv == NULL) {
 2545                                 cdm->status = CAM_DEV_MATCH_ERROR;
 2546                                 return(0);
 2547                         }
 2548 
 2549                         cdm->pos.cookie.pdrv = pdrv;
 2550                         /*
 2551                          * The periph generation slot does double duty, as
 2552                          * does the periph pointer slot.  They are used for
 2553                          * both edt and pdrv lookups and positioning.
 2554                          */
 2555                         cdm->pos.cookie.periph = periph;
 2556                         cdm->pos.generations[CAM_PERIPH_GENERATION] =
 2557                                 (*pdrv)->generation;
 2558                         cdm->status = CAM_DEV_MATCH_MORE;
 2559                         return(0);
 2560                 }
 2561 
 2562                 j = cdm->num_matches;
 2563                 cdm->num_matches++;
 2564                 cdm->matches[j].type = DEV_MATCH_PERIPH;
 2565                 cdm->matches[j].result.periph_result.path_id =
 2566                         periph->path->bus->path_id;
 2567 
 2568                 /*
 2569                  * The transport layer peripheral doesn't have a target or
 2570                  * lun.
 2571                  */
 2572                 if (periph->path->target)
 2573                         cdm->matches[j].result.periph_result.target_id =
 2574                                 periph->path->target->target_id;
 2575                 else
 2576                         cdm->matches[j].result.periph_result.target_id = -1;
 2577 
 2578                 if (periph->path->device)
 2579                         cdm->matches[j].result.periph_result.target_lun =
 2580                                 periph->path->device->lun_id;
 2581                 else
 2582                         cdm->matches[j].result.periph_result.target_lun = -1;
 2583 
 2584                 cdm->matches[j].result.periph_result.unit_number =
 2585                         periph->unit_number;
 2586                 strncpy(cdm->matches[j].result.periph_result.periph_name,
 2587                         periph->periph_name, DEV_IDLEN);
 2588         }
 2589 
 2590         return(1);
 2591 }
 2592 
 2593 static int
 2594 xptperiphlistmatch(struct ccb_dev_match *cdm)
 2595 {
 2596         int ret;
 2597 
 2598         cdm->num_matches = 0;
 2599 
 2600         /*
 2601          * At this point in the edt traversal function, we check the bus
 2602          * list generation to make sure that no busses have been added or
 2603          * removed since the user last sent a XPT_DEV_MATCH ccb through.
 2604          * For the peripheral driver list traversal function, however, we
 2605          * don't have to worry about new peripheral driver types coming or
 2606          * going; they're in a linker set, and therefore can't change
 2607          * without a recompile.
 2608          */
 2609 
 2610         if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
 2611          && (cdm->pos.cookie.pdrv != NULL))
 2612                 ret = xptpdrvtraverse(
 2613                                 (struct periph_driver **)cdm->pos.cookie.pdrv,
 2614                                 xptplistpdrvfunc, cdm);
 2615         else
 2616                 ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm);
 2617 
 2618         /*
 2619          * If we get back 0, that means that we had to stop before fully
 2620          * traversing the peripheral driver tree.  It also means that one of
 2621          * the subroutines has set the status field to the proper value.  If
 2622          * we get back 1, we've fully traversed the EDT and copied out any
 2623          * matching entries.
 2624          */
 2625         if (ret == 1)
 2626                 cdm->status = CAM_DEV_MATCH_LAST;
 2627 
 2628         return(ret);
 2629 }
 2630 
 2631 static int
 2632 xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg)
 2633 {
 2634         struct cam_eb *bus, *next_bus;
 2635         int retval;
 2636 
 2637         retval = 1;
 2638 
 2639         mtx_lock(&xsoftc.xpt_topo_lock);
 2640         for (bus = (start_bus ? start_bus : TAILQ_FIRST(&xsoftc.xpt_busses));
 2641              bus != NULL;
 2642              bus = next_bus) {
 2643                 next_bus = TAILQ_NEXT(bus, links);
 2644 
 2645                 mtx_unlock(&xsoftc.xpt_topo_lock);
 2646                 CAM_SIM_LOCK(bus->sim);
 2647                 retval = tr_func(bus, arg);
 2648                 CAM_SIM_UNLOCK(bus->sim);
 2649                 if (retval == 0)
 2650                         return(retval);
 2651                 mtx_lock(&xsoftc.xpt_topo_lock);
 2652         }
 2653         mtx_unlock(&xsoftc.xpt_topo_lock);
 2654 
 2655         return(retval);
 2656 }
 2657 
 2658 static int
 2659 xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target,
 2660                   xpt_targetfunc_t *tr_func, void *arg)
 2661 {
 2662         struct cam_et *target, *next_target;
 2663         int retval;
 2664 
 2665         retval = 1;
 2666         for (target = (start_target ? start_target :
 2667                        TAILQ_FIRST(&bus->et_entries));
 2668              target != NULL; target = next_target) {
 2669 
 2670                 next_target = TAILQ_NEXT(target, links);
 2671 
 2672                 retval = tr_func(target, arg);
 2673 
 2674                 if (retval == 0)
 2675                         return(retval);
 2676         }
 2677 
 2678         return(retval);
 2679 }
 2680 
 2681 static int
 2682 xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device,
 2683                   xpt_devicefunc_t *tr_func, void *arg)
 2684 {
 2685         struct cam_ed *device, *next_device;
 2686         int retval;
 2687 
 2688         retval = 1;
 2689         for (device = (start_device ? start_device :
 2690                        TAILQ_FIRST(&target->ed_entries));
 2691              device != NULL;
 2692              device = next_device) {
 2693 
 2694                 next_device = TAILQ_NEXT(device, links);
 2695 
 2696                 retval = tr_func(device, arg);
 2697 
 2698                 if (retval == 0)
 2699                         return(retval);
 2700         }
 2701 
 2702         return(retval);
 2703 }
 2704 
 2705 static int
 2706 xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph,
 2707                   xpt_periphfunc_t *tr_func, void *arg)
 2708 {
 2709         struct cam_periph *periph, *next_periph;
 2710         int retval;
 2711 
 2712         retval = 1;
 2713 
 2714         for (periph = (start_periph ? start_periph :
 2715                        SLIST_FIRST(&device->periphs));
 2716              periph != NULL;
 2717              periph = next_periph) {
 2718 
 2719                 next_periph = SLIST_NEXT(periph, periph_links);
 2720 
 2721                 retval = tr_func(periph, arg);
 2722                 if (retval == 0)
 2723                         return(retval);
 2724         }
 2725 
 2726         return(retval);
 2727 }
 2728 
 2729 static int
 2730 xptpdrvtraverse(struct periph_driver **start_pdrv,
 2731                 xpt_pdrvfunc_t *tr_func, void *arg)
 2732 {
 2733         struct periph_driver **pdrv;
 2734         int retval;
 2735 
 2736         retval = 1;
 2737 
 2738         /*
 2739          * We don't traverse the peripheral driver list like we do the
 2740          * other lists, because it is a linker set, and therefore cannot be
 2741          * changed during runtime.  If the peripheral driver list is ever
 2742          * re-done to be something other than a linker set (i.e. it can
 2743          * change while the system is running), the list traversal should
 2744          * be modified to work like the other traversal functions.
 2745          */
 2746         for (pdrv = (start_pdrv ? start_pdrv : periph_drivers);
 2747              *pdrv != NULL; pdrv++) {
 2748                 retval = tr_func(pdrv, arg);
 2749 
 2750                 if (retval == 0)
 2751                         return(retval);
 2752         }
 2753 
 2754         return(retval);
 2755 }
 2756 
 2757 static int
 2758 xptpdperiphtraverse(struct periph_driver **pdrv,
 2759                     struct cam_periph *start_periph,
 2760                     xpt_periphfunc_t *tr_func, void *arg)
 2761 {
 2762         struct cam_periph *periph, *next_periph;
 2763         int retval;
 2764 
 2765         retval = 1;
 2766 
 2767         for (periph = (start_periph ? start_periph :
 2768              TAILQ_FIRST(&(*pdrv)->units)); periph != NULL;
 2769              periph = next_periph) {
 2770 
 2771                 next_periph = TAILQ_NEXT(periph, unit_links);
 2772 
 2773                 retval = tr_func(periph, arg);
 2774                 if (retval == 0)
 2775                         return(retval);
 2776         }
 2777         return(retval);
 2778 }
 2779 
 2780 static int
 2781 xptdefbusfunc(struct cam_eb *bus, void *arg)
 2782 {
 2783         struct xpt_traverse_config *tr_config;
 2784 
 2785         tr_config = (struct xpt_traverse_config *)arg;
 2786 
 2787         if (tr_config->depth == XPT_DEPTH_BUS) {
 2788                 xpt_busfunc_t *tr_func;
 2789 
 2790                 tr_func = (xpt_busfunc_t *)tr_config->tr_func;
 2791 
 2792                 return(tr_func(bus, tr_config->tr_arg));
 2793         } else
 2794                 return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg));
 2795 }
 2796 
 2797 static int
 2798 xptdeftargetfunc(struct cam_et *target, void *arg)
 2799 {
 2800         struct xpt_traverse_config *tr_config;
 2801 
 2802         tr_config = (struct xpt_traverse_config *)arg;
 2803 
 2804         if (tr_config->depth == XPT_DEPTH_TARGET) {
 2805                 xpt_targetfunc_t *tr_func;
 2806 
 2807                 tr_func = (xpt_targetfunc_t *)tr_config->tr_func;
 2808 
 2809                 return(tr_func(target, tr_config->tr_arg));
 2810         } else
 2811                 return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg));
 2812 }
 2813 
 2814 static int
 2815 xptdefdevicefunc(struct cam_ed *device, void *arg)
 2816 {
 2817         struct xpt_traverse_config *tr_config;
 2818 
 2819         tr_config = (struct xpt_traverse_config *)arg;
 2820 
 2821         if (tr_config->depth == XPT_DEPTH_DEVICE) {
 2822                 xpt_devicefunc_t *tr_func;
 2823 
 2824                 tr_func = (xpt_devicefunc_t *)tr_config->tr_func;
 2825 
 2826                 return(tr_func(device, tr_config->tr_arg));
 2827         } else
 2828                 return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg));
 2829 }
 2830 
 2831 static int
 2832 xptdefperiphfunc(struct cam_periph *periph, void *arg)
 2833 {
 2834         struct xpt_traverse_config *tr_config;
 2835         xpt_periphfunc_t *tr_func;
 2836 
 2837         tr_config = (struct xpt_traverse_config *)arg;
 2838 
 2839         tr_func = (xpt_periphfunc_t *)tr_config->tr_func;
 2840 
 2841         /*
 2842          * Unlike the other default functions, we don't check for depth
 2843          * here.  The peripheral driver level is the last level in the EDT,
 2844          * so if we're here, we should execute the function in question.
 2845          */
 2846         return(tr_func(periph, tr_config->tr_arg));
 2847 }
 2848 
 2849 /*
 2850  * Execute the given function for every bus in the EDT.
 2851  */
 2852 static int
 2853 xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg)
 2854 {
 2855         struct xpt_traverse_config tr_config;
 2856 
 2857         tr_config.depth = XPT_DEPTH_BUS;
 2858         tr_config.tr_func = tr_func;
 2859         tr_config.tr_arg = arg;
 2860 
 2861         return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
 2862 }
 2863 
 2864 /*
 2865  * Execute the given function for every device in the EDT.
 2866  */
 2867 static int
 2868 xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg)
 2869 {
 2870         struct xpt_traverse_config tr_config;
 2871 
 2872         tr_config.depth = XPT_DEPTH_DEVICE;
 2873         tr_config.tr_func = tr_func;
 2874         tr_config.tr_arg = arg;
 2875 
 2876         return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
 2877 }
 2878 
 2879 static int
 2880 xptsetasyncfunc(struct cam_ed *device, void *arg)
 2881 {
 2882         struct cam_path path;
 2883         struct ccb_getdev cgd;
 2884         struct async_node *cur_entry;
 2885 
 2886         cur_entry = (struct async_node *)arg;
 2887 
 2888         /*
 2889          * Don't report unconfigured devices (Wildcard devs,
 2890          * devices only for target mode, device instances
 2891          * that have been invalidated but are waiting for
 2892          * their last reference count to be released).
 2893          */
 2894         if ((device->flags & CAM_DEV_UNCONFIGURED) != 0)
 2895                 return (1);
 2896 
 2897         xpt_compile_path(&path,
 2898                          NULL,
 2899                          device->target->bus->path_id,
 2900                          device->target->target_id,
 2901                          device->lun_id);
 2902         xpt_setup_ccb(&cgd.ccb_h, &path, /*priority*/1);
 2903         cgd.ccb_h.func_code = XPT_GDEV_TYPE;
 2904         xpt_action((union ccb *)&cgd);
 2905         cur_entry->callback(cur_entry->callback_arg,
 2906                             AC_FOUND_DEVICE,
 2907                             &path, &cgd);
 2908         xpt_release_path(&path);
 2909 
 2910         return(1);
 2911 }
 2912 
 2913 static int
 2914 xptsetasyncbusfunc(struct cam_eb *bus, void *arg)
 2915 {
 2916         struct cam_path path;
 2917         struct ccb_pathinq cpi;
 2918         struct async_node *cur_entry;
 2919 
 2920         cur_entry = (struct async_node *)arg;
 2921 
 2922         xpt_compile_path(&path, /*periph*/NULL,
 2923                          bus->sim->path_id,
 2924                          CAM_TARGET_WILDCARD,
 2925                          CAM_LUN_WILDCARD);
 2926         xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
 2927         cpi.ccb_h.func_code = XPT_PATH_INQ;
 2928         xpt_action((union ccb *)&cpi);
 2929         cur_entry->callback(cur_entry->callback_arg,
 2930                             AC_PATH_REGISTERED,
 2931                             &path, &cpi);
 2932         xpt_release_path(&path);
 2933 
 2934         return(1);
 2935 }
 2936 
 2937 static void
 2938 xpt_action_sasync_cb(void *context, int pending)
 2939 {
 2940         struct async_node *cur_entry;
 2941         struct xpt_task *task;
 2942         uint32_t added;
 2943 
 2944         task = (struct xpt_task *)context;
 2945         cur_entry = (struct async_node *)task->data1;
 2946         added = task->data2;
 2947 
 2948         if ((added & AC_FOUND_DEVICE) != 0) {
 2949                 /*
 2950                  * Get this peripheral up to date with all
 2951                  * the currently existing devices.
 2952                  */
 2953                 xpt_for_all_devices(xptsetasyncfunc, cur_entry);
 2954         }
 2955         if ((added & AC_PATH_REGISTERED) != 0) {
 2956                 /*
 2957                  * Get this peripheral up to date with all
 2958                  * the currently existing busses.
 2959                  */
 2960                 xpt_for_all_busses(xptsetasyncbusfunc, cur_entry);
 2961         }
 2962 
 2963         free(task, M_CAMXPT);
 2964 }
 2965 
 2966 void
 2967 xpt_action(union ccb *start_ccb)
 2968 {
 2969 
 2970         CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_action\n"));
 2971 
 2972         start_ccb->ccb_h.status = CAM_REQ_INPROG;
 2973 
 2974         switch (start_ccb->ccb_h.func_code) {
 2975         case XPT_SCSI_IO:
 2976         {
 2977                 struct cam_ed *device;
 2978 #ifdef CAMDEBUG
 2979                 char cdb_str[(SCSI_MAX_CDBLEN * 3) + 1];
 2980                 struct cam_path *path;
 2981 
 2982                 path = start_ccb->ccb_h.path;
 2983 #endif
 2984 
 2985                 /*
 2986                  * For the sake of compatibility with SCSI-1
 2987                  * devices that may not understand the identify
 2988                  * message, we include lun information in the
 2989                  * second byte of all commands.  SCSI-1 specifies
 2990                  * that luns are a 3 bit value and reserves only 3
 2991                  * bits for lun information in the CDB.  Later
 2992                  * revisions of the SCSI spec allow for more than 8
 2993                  * luns, but have deprecated lun information in the
 2994                  * CDB.  So, if the lun won't fit, we must omit.
 2995                  *
 2996                  * Also be aware that during initial probing for devices,
 2997                  * the inquiry information is unknown but initialized to 0.
 2998                  * This means that this code will be exercised while probing
 2999                  * devices with an ANSI revision greater than 2.
 3000                  */
 3001                 device = start_ccb->ccb_h.path->device;
 3002                 if (device->protocol_version <= SCSI_REV_2
 3003                  && start_ccb->ccb_h.target_lun < 8
 3004                  && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) {
 3005 
 3006                         start_ccb->csio.cdb_io.cdb_bytes[1] |=
 3007                             start_ccb->ccb_h.target_lun << 5;
 3008                 }
 3009                 start_ccb->csio.scsi_status = SCSI_STATUS_OK;
 3010                 CAM_DEBUG(path, CAM_DEBUG_CDB,("%s. CDB: %s\n",
 3011                           scsi_op_desc(start_ccb->csio.cdb_io.cdb_bytes[0],
 3012                                        &path->device->inq_data),
 3013                           scsi_cdb_string(start_ccb->csio.cdb_io.cdb_bytes,
 3014                                           cdb_str, sizeof(cdb_str))));
 3015         }
 3016         /* FALLTHROUGH */
 3017         case XPT_TARGET_IO:
 3018         case XPT_CONT_TARGET_IO:
 3019                 start_ccb->csio.sense_resid = 0;
 3020                 start_ccb->csio.resid = 0;
 3021                 /* FALLTHROUGH */
 3022         case XPT_RESET_DEV:
 3023         case XPT_ENG_EXEC:
 3024         {
 3025                 struct cam_path *path;
 3026                 struct cam_sim *sim;
 3027                 int runq;
 3028 
 3029                 path = start_ccb->ccb_h.path;
 3030 
 3031                 sim = path->bus->sim;
 3032                 if (SIM_DEAD(sim)) {
 3033                         /* The SIM has gone; just execute the CCB directly. */
 3034                         cam_ccbq_send_ccb(&path->device->ccbq, start_ccb);
 3035                         (*(sim->sim_action))(sim, start_ccb);
 3036                         break;
 3037                 }
 3038 
 3039                 cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb);
 3040                 if (path->device->qfrozen_cnt == 0)
 3041                         runq = xpt_schedule_dev_sendq(path->bus, path->device);
 3042                 else
 3043                         runq = 0;
 3044                 if (runq != 0)
 3045                         xpt_run_dev_sendq(path->bus);
 3046                 break;
 3047         }
 3048         case XPT_SET_TRAN_SETTINGS:
 3049         {
 3050                 xpt_set_transfer_settings(&start_ccb->cts,
 3051                                           start_ccb->ccb_h.path->device,
 3052                                           /*async_update*/FALSE);
 3053                 break;
 3054         }
 3055         case XPT_CALC_GEOMETRY:
 3056         {
 3057                 struct cam_sim *sim;
 3058 
 3059                 /* Filter out garbage */
 3060                 if (start_ccb->ccg.block_size == 0
 3061                  || start_ccb->ccg.volume_size == 0) {
 3062                         start_ccb->ccg.cylinders = 0;
 3063                         start_ccb->ccg.heads = 0;
 3064                         start_ccb->ccg.secs_per_track = 0;
 3065                         start_ccb->ccb_h.status = CAM_REQ_CMP;
 3066                         break;
 3067                 }
 3068 #ifdef PC98
 3069                 /*
 3070                  * In a PC-98 system, geometry translation depens on
 3071                  * the "real" device geometry obtained from mode page 4.
 3072                  * SCSI geometry translation is performed in the
 3073                  * initialization routine of the SCSI BIOS and the result
 3074                  * stored in host memory.  If the translation is available
 3075                  * in host memory, use it.  If not, rely on the default
 3076                  * translation the device driver performs.
 3077                  */
 3078                 if (scsi_da_bios_params(&start_ccb->ccg) != 0) {
 3079                         start_ccb->ccb_h.status = CAM_REQ_CMP;
 3080                         break;
 3081                 }
 3082 #endif
 3083                 sim = start_ccb->ccb_h.path->bus->sim;
 3084                 (*(sim->sim_action))(sim, start_ccb);
 3085                 break;
 3086         }
 3087         case XPT_ABORT:
 3088         {
 3089                 union ccb* abort_ccb;
 3090 
 3091                 abort_ccb = start_ccb->cab.abort_ccb;
 3092                 if (XPT_FC_IS_DEV_QUEUED(abort_ccb)) {
 3093 
 3094                         if (abort_ccb->ccb_h.pinfo.index >= 0) {
 3095                                 struct cam_ccbq *ccbq;
 3096 
 3097                                 ccbq = &abort_ccb->ccb_h.path->device->ccbq;
 3098                                 cam_ccbq_remove_ccb(ccbq, abort_ccb);
 3099                                 abort_ccb->ccb_h.status =
 3100                                     CAM_REQ_ABORTED|CAM_DEV_QFRZN;
 3101                                 xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
 3102                                 xpt_done(abort_ccb);
 3103                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 3104                                 break;
 3105                         }
 3106                         if (abort_ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX
 3107                          && (abort_ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) {
 3108                                 /*
 3109                                  * We've caught this ccb en route to
 3110                                  * the SIM.  Flag it for abort and the
 3111                                  * SIM will do so just before starting
 3112                                  * real work on the CCB.
 3113                                  */
 3114                                 abort_ccb->ccb_h.status =
 3115                                     CAM_REQ_ABORTED|CAM_DEV_QFRZN;
 3116                                 xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
 3117                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 3118                                 break;
 3119                         }
 3120                 }
 3121                 if (XPT_FC_IS_QUEUED(abort_ccb)
 3122                  && (abort_ccb->ccb_h.pinfo.index == CAM_DONEQ_INDEX)) {
 3123                         /*
 3124                          * It's already completed but waiting
 3125                          * for our SWI to get to it.
 3126                          */
 3127                         start_ccb->ccb_h.status = CAM_UA_ABORT;
 3128                         break;
 3129                 }
 3130                 /*
 3131                  * If we weren't able to take care of the abort request
 3132                  * in the XPT, pass the request down to the SIM for processing.
 3133                  */
 3134         }
 3135         /* FALLTHROUGH */
 3136         case XPT_ACCEPT_TARGET_IO:
 3137         case XPT_EN_LUN:
 3138         case XPT_IMMED_NOTIFY:
 3139         case XPT_NOTIFY_ACK:
 3140         case XPT_GET_TRAN_SETTINGS:
 3141         case XPT_RESET_BUS:
 3142         {
 3143                 struct cam_sim *sim;
 3144 
 3145                 sim = start_ccb->ccb_h.path->bus->sim;
 3146                 (*(sim->sim_action))(sim, start_ccb);
 3147                 break;
 3148         }
 3149         case XPT_PATH_INQ:
 3150         {
 3151                 struct cam_sim *sim;
 3152 
 3153                 sim = start_ccb->ccb_h.path->bus->sim;
 3154                 (*(sim->sim_action))(sim, start_ccb);
 3155                 break;
 3156         }
 3157         case XPT_PATH_STATS:
 3158                 start_ccb->cpis.last_reset =
 3159                         start_ccb->ccb_h.path->bus->last_reset;
 3160                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 3161                 break;
 3162         case XPT_GDEV_TYPE:
 3163         {
 3164                 struct cam_ed *dev;
 3165 
 3166                 dev = start_ccb->ccb_h.path->device;
 3167                 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
 3168                         start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
 3169                 } else {
 3170                         struct ccb_getdev *cgd;
 3171                         struct cam_eb *bus;
 3172                         struct cam_et *tar;
 3173 
 3174                         cgd = &start_ccb->cgd;
 3175                         bus = cgd->ccb_h.path->bus;
 3176                         tar = cgd->ccb_h.path->target;
 3177                         cgd->inq_data = dev->inq_data;
 3178                         cgd->ccb_h.status = CAM_REQ_CMP;
 3179                         cgd->serial_num_len = dev->serial_num_len;
 3180                         if ((dev->serial_num_len > 0)
 3181                          && (dev->serial_num != NULL))
 3182                                 bcopy(dev->serial_num, cgd->serial_num,
 3183                                       dev->serial_num_len);
 3184                 }
 3185                 break;
 3186         }
 3187         case XPT_GDEV_STATS:
 3188         {
 3189                 struct cam_ed *dev;
 3190 
 3191                 dev = start_ccb->ccb_h.path->device;
 3192                 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
 3193                         start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
 3194                 } else {
 3195                         struct ccb_getdevstats *cgds;
 3196                         struct cam_eb *bus;
 3197                         struct cam_et *tar;
 3198 
 3199                         cgds = &start_ccb->cgds;
 3200                         bus = cgds->ccb_h.path->bus;
 3201                         tar = cgds->ccb_h.path->target;
 3202                         cgds->dev_openings = dev->ccbq.dev_openings;
 3203                         cgds->dev_active = dev->ccbq.dev_active;
 3204                         cgds->devq_openings = dev->ccbq.devq_openings;
 3205                         cgds->devq_queued = dev->ccbq.queue.entries;
 3206                         cgds->held = dev->ccbq.held;
 3207                         cgds->last_reset = tar->last_reset;
 3208                         cgds->maxtags = dev->quirk->maxtags;
 3209                         cgds->mintags = dev->quirk->mintags;
 3210                         if (timevalcmp(&tar->last_reset, &bus->last_reset, <))
 3211                                 cgds->last_reset = bus->last_reset;
 3212                         cgds->ccb_h.status = CAM_REQ_CMP;
 3213                 }
 3214                 break;
 3215         }
 3216         case XPT_GDEVLIST:
 3217         {
 3218                 struct cam_periph       *nperiph;
 3219                 struct periph_list      *periph_head;
 3220                 struct ccb_getdevlist   *cgdl;
 3221                 u_int                   i;
 3222                 struct cam_ed           *device;
 3223                 int                     found;
 3224 
 3225 
 3226                 found = 0;
 3227 
 3228                 /*
 3229                  * Don't want anyone mucking with our data.
 3230                  */
 3231                 device = start_ccb->ccb_h.path->device;
 3232                 periph_head = &device->periphs;
 3233                 cgdl = &start_ccb->cgdl;
 3234 
 3235                 /*
 3236                  * Check and see if the list has changed since the user
 3237                  * last requested a list member.  If so, tell them that the
 3238                  * list has changed, and therefore they need to start over
 3239                  * from the beginning.
 3240                  */
 3241                 if ((cgdl->index != 0) &&
 3242                     (cgdl->generation != device->generation)) {
 3243                         cgdl->status = CAM_GDEVLIST_LIST_CHANGED;
 3244                         break;
 3245                 }
 3246 
 3247                 /*
 3248                  * Traverse the list of peripherals and attempt to find
 3249                  * the requested peripheral.
 3250                  */
 3251                 for (nperiph = SLIST_FIRST(periph_head), i = 0;
 3252                      (nperiph != NULL) && (i <= cgdl->index);
 3253                      nperiph = SLIST_NEXT(nperiph, periph_links), i++) {
 3254                         if (i == cgdl->index) {
 3255                                 strncpy(cgdl->periph_name,
 3256                                         nperiph->periph_name,
 3257                                         DEV_IDLEN);
 3258                                 cgdl->unit_number = nperiph->unit_number;
 3259                                 found = 1;
 3260                         }
 3261                 }
 3262                 if (found == 0) {
 3263                         cgdl->status = CAM_GDEVLIST_ERROR;
 3264                         break;
 3265                 }
 3266 
 3267                 if (nperiph == NULL)
 3268                         cgdl->status = CAM_GDEVLIST_LAST_DEVICE;
 3269                 else
 3270                         cgdl->status = CAM_GDEVLIST_MORE_DEVS;
 3271 
 3272                 cgdl->index++;
 3273                 cgdl->generation = device->generation;
 3274 
 3275                 cgdl->ccb_h.status = CAM_REQ_CMP;
 3276                 break;
 3277         }
 3278         case XPT_DEV_MATCH:
 3279         {
 3280                 dev_pos_type position_type;
 3281                 struct ccb_dev_match *cdm;
 3282 
 3283                 cdm = &start_ccb->cdm;
 3284 
 3285                 /*
 3286                  * There are two ways of getting at information in the EDT.
 3287                  * The first way is via the primary EDT tree.  It starts
 3288                  * with a list of busses, then a list of targets on a bus,
 3289                  * then devices/luns on a target, and then peripherals on a
 3290                  * device/lun.  The "other" way is by the peripheral driver
 3291                  * lists.  The peripheral driver lists are organized by
 3292                  * peripheral driver.  (obviously)  So it makes sense to
 3293                  * use the peripheral driver list if the user is looking
 3294                  * for something like "da1", or all "da" devices.  If the
 3295                  * user is looking for something on a particular bus/target
 3296                  * or lun, it's generally better to go through the EDT tree.
 3297                  */
 3298 
 3299                 if (cdm->pos.position_type != CAM_DEV_POS_NONE)
 3300                         position_type = cdm->pos.position_type;
 3301                 else {
 3302                         u_int i;
 3303 
 3304                         position_type = CAM_DEV_POS_NONE;
 3305 
 3306                         for (i = 0; i < cdm->num_patterns; i++) {
 3307                                 if ((cdm->patterns[i].type == DEV_MATCH_BUS)
 3308                                  ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){
 3309                                         position_type = CAM_DEV_POS_EDT;
 3310                                         break;
 3311                                 }
 3312                         }
 3313 
 3314                         if (cdm->num_patterns == 0)
 3315                                 position_type = CAM_DEV_POS_EDT;
 3316                         else if (position_type == CAM_DEV_POS_NONE)
 3317                                 position_type = CAM_DEV_POS_PDRV;
 3318                 }
 3319 
 3320                 switch(position_type & CAM_DEV_POS_TYPEMASK) {
 3321                 case CAM_DEV_POS_EDT:
 3322                         xptedtmatch(cdm);
 3323                         break;
 3324                 case CAM_DEV_POS_PDRV:
 3325                         xptperiphlistmatch(cdm);
 3326                         break;
 3327                 default:
 3328                         cdm->status = CAM_DEV_MATCH_ERROR;
 3329                         break;
 3330                 }
 3331 
 3332                 if (cdm->status == CAM_DEV_MATCH_ERROR)
 3333                         start_ccb->ccb_h.status = CAM_REQ_CMP_ERR;
 3334                 else
 3335                         start_ccb->ccb_h.status = CAM_REQ_CMP;
 3336 
 3337                 break;
 3338         }
 3339         case XPT_SASYNC_CB:
 3340         {
 3341                 struct ccb_setasync *csa;
 3342                 struct async_node *cur_entry;
 3343                 struct async_list *async_head;
 3344                 u_int32_t added;
 3345 
 3346                 csa = &start_ccb->csa;
 3347                 added = csa->event_enable;
 3348                 async_head = &csa->ccb_h.path->device->asyncs;
 3349 
 3350                 /*
 3351                  * If there is already an entry for us, simply
 3352                  * update it.
 3353                  */
 3354                 cur_entry = SLIST_FIRST(async_head);
 3355                 while (cur_entry != NULL) {
 3356                         if ((cur_entry->callback_arg == csa->callback_arg)
 3357                          && (cur_entry->callback == csa->callback))
 3358                                 break;
 3359                         cur_entry = SLIST_NEXT(cur_entry, links);
 3360                 }
 3361 
 3362                 if (cur_entry != NULL) {
 3363                         /*
 3364                          * If the request has no flags set,
 3365                          * remove the entry.
 3366                          */
 3367                         added &= ~cur_entry->event_enable;
 3368                         if (csa->event_enable == 0) {
 3369                                 SLIST_REMOVE(async_head, cur_entry,
 3370                                              async_node, links);
 3371                                 csa->ccb_h.path->device->refcount--;
 3372                                 free(cur_entry, M_CAMXPT);
 3373                         } else {
 3374                                 cur_entry->event_enable = csa->event_enable;
 3375                         }
 3376                 } else {
 3377                         cur_entry = malloc(sizeof(*cur_entry), M_CAMXPT,
 3378                                            M_NOWAIT);
 3379                         if (cur_entry == NULL) {
 3380                                 csa->ccb_h.status = CAM_RESRC_UNAVAIL;
 3381                                 break;
 3382                         }
 3383                         cur_entry->event_enable = csa->event_enable;
 3384                         cur_entry->callback_arg = csa->callback_arg;
 3385                         cur_entry->callback = csa->callback;
 3386                         SLIST_INSERT_HEAD(async_head, cur_entry, links);
 3387                         csa->ccb_h.path->device->refcount++;
 3388                 }
 3389 
 3390                 /*
 3391                  * Need to decouple this operation via a taqskqueue so that
 3392                  * the locking doesn't become a mess.
 3393                  */
 3394                 if ((added & (AC_FOUND_DEVICE | AC_PATH_REGISTERED)) != 0) {
 3395                         struct xpt_task *task;
 3396 
 3397                         task = malloc(sizeof(struct xpt_task), M_CAMXPT,
 3398                                       M_NOWAIT);
 3399                         if (task == NULL) {
 3400                                 csa->ccb_h.status = CAM_RESRC_UNAVAIL;
 3401                                 break;
 3402                         }
 3403 
 3404                         TASK_INIT(&task->task, 0, xpt_action_sasync_cb, task);
 3405                         task->data1 = cur_entry;
 3406                         task->data2 = added;
 3407                         taskqueue_enqueue(taskqueue_thread, &task->task);
 3408                 }
 3409 
 3410                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 3411                 break;
 3412         }
 3413         case XPT_REL_SIMQ:
 3414         {
 3415                 struct ccb_relsim *crs;
 3416                 struct cam_ed *dev;
 3417 
 3418                 crs = &start_ccb->crs;
 3419                 dev = crs->ccb_h.path->device;
 3420                 if (dev == NULL) {
 3421 
 3422                         crs->ccb_h.status = CAM_DEV_NOT_THERE;
 3423                         break;
 3424                 }
 3425 
 3426                 if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) {
 3427 
 3428                         if (INQ_DATA_TQ_ENABLED(&dev->inq_data)) {
 3429                                 /* Don't ever go below one opening */
 3430                                 if (crs->openings > 0) {
 3431                                         xpt_dev_ccbq_resize(crs->ccb_h.path,
 3432                                                             crs->openings);
 3433 
 3434                                         if (bootverbose) {
 3435                                                 xpt_print(crs->ccb_h.path,
 3436                                                     "tagged openings now %d\n",
 3437                                                     crs->openings);
 3438                                         }
 3439                                 }
 3440                         }
 3441                 }
 3442 
 3443                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) {
 3444 
 3445                         if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
 3446 
 3447                                 /*
 3448                                  * Just extend the old timeout and decrement
 3449                                  * the freeze count so that a single timeout
 3450                                  * is sufficient for releasing the queue.
 3451                                  */
 3452                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
 3453                                 callout_stop(&dev->callout);
 3454                         } else {
 3455 
 3456                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
 3457                         }
 3458 
 3459                         callout_reset(&dev->callout,
 3460                             (crs->release_timeout * hz) / 1000,
 3461                             xpt_release_devq_timeout, dev);
 3462 
 3463                         dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING;
 3464 
 3465                 }
 3466 
 3467                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) {
 3468 
 3469                         if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) {
 3470                                 /*
 3471                                  * Decrement the freeze count so that a single
 3472                                  * completion is still sufficient to unfreeze
 3473                                  * the queue.
 3474                                  */
 3475                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
 3476                         } else {
 3477 
 3478                                 dev->flags |= CAM_DEV_REL_ON_COMPLETE;
 3479                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
 3480                         }
 3481                 }
 3482 
 3483                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) {
 3484 
 3485                         if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
 3486                          || (dev->ccbq.dev_active == 0)) {
 3487 
 3488                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
 3489                         } else {
 3490 
 3491                                 dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY;
 3492                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
 3493                         }
 3494                 }
 3495 
 3496                 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0) {
 3497 
 3498                         xpt_release_devq(crs->ccb_h.path, /*count*/1,
 3499                                          /*run_queue*/TRUE);
 3500                 }
 3501                 start_ccb->crs.qfrozen_cnt = dev->qfrozen_cnt;
 3502                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 3503                 break;
 3504         }
 3505         case XPT_SCAN_BUS:
 3506                 xpt_scan_bus(start_ccb->ccb_h.path->periph, start_ccb);
 3507                 break;
 3508         case XPT_SCAN_LUN:
 3509                 xpt_scan_lun(start_ccb->ccb_h.path->periph,
 3510                              start_ccb->ccb_h.path, start_ccb->crcn.flags,
 3511                              start_ccb);
 3512                 break;
 3513         case XPT_DEBUG: {
 3514 #ifdef CAMDEBUG
 3515 #ifdef CAM_DEBUG_DELAY
 3516                 cam_debug_delay = CAM_DEBUG_DELAY;
 3517 #endif
 3518                 cam_dflags = start_ccb->cdbg.flags;
 3519                 if (cam_dpath != NULL) {
 3520                         xpt_free_path(cam_dpath);
 3521                         cam_dpath = NULL;
 3522                 }
 3523 
 3524                 if (cam_dflags != CAM_DEBUG_NONE) {
 3525                         if (xpt_create_path(&cam_dpath, xpt_periph,
 3526                                             start_ccb->ccb_h.path_id,
 3527                                             start_ccb->ccb_h.target_id,
 3528                                             start_ccb->ccb_h.target_lun) !=
 3529                                             CAM_REQ_CMP) {
 3530                                 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
 3531                                 cam_dflags = CAM_DEBUG_NONE;
 3532                         } else {
 3533                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 3534                                 xpt_print(cam_dpath, "debugging flags now %x\n",
 3535                                     cam_dflags);
 3536                         }
 3537                 } else {
 3538                         cam_dpath = NULL;
 3539                         start_ccb->ccb_h.status = CAM_REQ_CMP;
 3540                 }
 3541 #else /* !CAMDEBUG */
 3542                 start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
 3543 #endif /* CAMDEBUG */
 3544                 break;
 3545         }
 3546         case XPT_NOOP:
 3547                 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0)
 3548                         xpt_freeze_devq(start_ccb->ccb_h.path, 1);
 3549                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 3550                 break;
 3551         default:
 3552         case XPT_SDEV_TYPE:
 3553         case XPT_TERM_IO:
 3554         case XPT_ENG_INQ:
 3555                 /* XXX Implement */
 3556                 start_ccb->ccb_h.status = CAM_PROVIDE_FAIL;
 3557                 break;
 3558         }
 3559 }
 3560 
 3561 void
 3562 xpt_polled_action(union ccb *start_ccb)
 3563 {
 3564         u_int32_t timeout;
 3565         struct    cam_sim *sim;
 3566         struct    cam_devq *devq;
 3567         struct    cam_ed *dev;
 3568 
 3569 
 3570         timeout = start_ccb->ccb_h.timeout;
 3571         sim = start_ccb->ccb_h.path->bus->sim;
 3572         devq = sim->devq;
 3573         dev = start_ccb->ccb_h.path->device;
 3574 
 3575         mtx_assert(sim->mtx, MA_OWNED);
 3576 
 3577         /*
 3578          * Steal an opening so that no other queued requests
 3579          * can get it before us while we simulate interrupts.
 3580          */
 3581         dev->ccbq.devq_openings--;
 3582         dev->ccbq.dev_openings--;
 3583 
 3584         while(((devq != NULL && devq->send_openings <= 0) ||
 3585            dev->ccbq.dev_openings < 0) && (--timeout > 0)) {
 3586                 DELAY(1000);
 3587                 (*(sim->sim_poll))(sim);
 3588                 camisr_runqueue(&sim->sim_doneq);
 3589         }
 3590 
 3591         dev->ccbq.devq_openings++;
 3592         dev->ccbq.dev_openings++;
 3593 
 3594         if (timeout != 0) {
 3595                 xpt_action(start_ccb);
 3596                 while(--timeout > 0) {
 3597                         (*(sim->sim_poll))(sim);
 3598                         camisr_runqueue(&sim->sim_doneq);
 3599                         if ((start_ccb->ccb_h.status  & CAM_STATUS_MASK)
 3600                             != CAM_REQ_INPROG)
 3601                                 break;
 3602                         DELAY(1000);
 3603                 }
 3604                 if (timeout == 0) {
 3605                         /*
 3606                          * XXX Is it worth adding a sim_timeout entry
 3607                          * point so we can attempt recovery?  If
 3608                          * this is only used for dumps, I don't think
 3609                          * it is.
 3610                          */
 3611                         start_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
 3612                 }
 3613         } else {
 3614                 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
 3615         }
 3616 }
 3617 
 3618 /*
 3619  * Schedule a peripheral driver to receive a ccb when it's
 3620  * target device has space for more transactions.
 3621  */
 3622 void
 3623 xpt_schedule(struct cam_periph *perph, u_int32_t new_priority)
 3624 {
 3625         struct cam_ed *device;
 3626         union ccb *work_ccb;
 3627         int runq;
 3628 
 3629         mtx_assert(perph->sim->mtx, MA_OWNED);
 3630 
 3631         CAM_DEBUG(perph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n"));
 3632         device = perph->path->device;
 3633         if (periph_is_queued(perph)) {
 3634                 /* Simply reorder based on new priority */
 3635                 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
 3636                           ("   change priority to %d\n", new_priority));
 3637                 if (new_priority < perph->pinfo.priority) {
 3638                         camq_change_priority(&device->drvq,
 3639                                              perph->pinfo.index,
 3640                                              new_priority);
 3641                 }
 3642                 runq = 0;
 3643         } else if (SIM_DEAD(perph->path->bus->sim)) {
 3644                 /* The SIM is gone so just call periph_start directly. */
 3645                 work_ccb = xpt_get_ccb(perph->path->device);
 3646                 if (work_ccb == NULL)
 3647                         return; /* XXX */
 3648                 xpt_setup_ccb(&work_ccb->ccb_h, perph->path, new_priority);
 3649                 perph->pinfo.priority = new_priority;
 3650                 perph->periph_start(perph, work_ccb);
 3651                 return;
 3652         } else {
 3653                 /* New entry on the queue */
 3654                 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
 3655                           ("   added periph to queue\n"));
 3656                 perph->pinfo.priority = new_priority;
 3657                 perph->pinfo.generation = ++device->drvq.generation;
 3658                 camq_insert(&device->drvq, &perph->pinfo);
 3659                 runq = xpt_schedule_dev_allocq(perph->path->bus, device);
 3660         }
 3661         if (runq != 0) {
 3662                 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
 3663                           ("   calling xpt_run_devq\n"));
 3664                 xpt_run_dev_allocq(perph->path->bus);
 3665         }
 3666 }
 3667 
 3668 
 3669 /*
 3670  * Schedule a device to run on a given queue.
 3671  * If the device was inserted as a new entry on the queue,
 3672  * return 1 meaning the device queue should be run. If we
 3673  * were already queued, implying someone else has already
 3674  * started the queue, return 0 so the caller doesn't attempt
 3675  * to run the queue.
 3676  */
 3677 static int
 3678 xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo,
 3679                  u_int32_t new_priority)
 3680 {
 3681         int retval;
 3682         u_int32_t old_priority;
 3683 
 3684         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_schedule_dev\n"));
 3685 
 3686         old_priority = pinfo->priority;
 3687 
 3688         /*
 3689          * Are we already queued?
 3690          */
 3691         if (pinfo->index != CAM_UNQUEUED_INDEX) {
 3692                 /* Simply reorder based on new priority */
 3693                 if (new_priority < old_priority) {
 3694                         camq_change_priority(queue, pinfo->index,
 3695                                              new_priority);
 3696                         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3697                                         ("changed priority to %d\n",
 3698                                          new_priority));
 3699                 }
 3700                 retval = 0;
 3701         } else {
 3702                 /* New entry on the queue */
 3703                 if (new_priority < old_priority)
 3704                         pinfo->priority = new_priority;
 3705 
 3706                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3707                                 ("Inserting onto queue\n"));
 3708                 pinfo->generation = ++queue->generation;
 3709                 camq_insert(queue, pinfo);
 3710                 retval = 1;
 3711         }
 3712         return (retval);
 3713 }
 3714 
 3715 static void
 3716 xpt_run_dev_allocq(struct cam_eb *bus)
 3717 {
 3718         struct  cam_devq *devq;
 3719 
 3720         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_allocq\n"));
 3721         devq = bus->sim->devq;
 3722 
 3723         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3724                         ("   qfrozen_cnt == 0x%x, entries == %d, "
 3725                          "openings == %d, active == %d\n",
 3726                          devq->alloc_queue.qfrozen_cnt,
 3727                          devq->alloc_queue.entries,
 3728                          devq->alloc_openings,
 3729                          devq->alloc_active));
 3730 
 3731         devq->alloc_queue.qfrozen_cnt++;
 3732         while ((devq->alloc_queue.entries > 0)
 3733             && (devq->alloc_openings > 0)
 3734             && (devq->alloc_queue.qfrozen_cnt <= 1)) {
 3735                 struct  cam_ed_qinfo *qinfo;
 3736                 struct  cam_ed *device;
 3737                 union   ccb *work_ccb;
 3738                 struct  cam_periph *drv;
 3739                 struct  camq *drvq;
 3740 
 3741                 qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->alloc_queue,
 3742                                                            CAMQ_HEAD);
 3743                 device = qinfo->device;
 3744 
 3745                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3746                                 ("running device %p\n", device));
 3747 
 3748                 drvq = &device->drvq;
 3749 
 3750 #ifdef CAMDEBUG
 3751                 if (drvq->entries <= 0) {
 3752                         panic("xpt_run_dev_allocq: "
 3753                               "Device on queue without any work to do");
 3754                 }
 3755 #endif
 3756                 if ((work_ccb = xpt_get_ccb(device)) != NULL) {
 3757                         devq->alloc_openings--;
 3758                         devq->alloc_active++;
 3759                         drv = (struct cam_periph*)camq_remove(drvq, CAMQ_HEAD);
 3760                         xpt_setup_ccb(&work_ccb->ccb_h, drv->path,
 3761                                       drv->pinfo.priority);
 3762                         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3763                                         ("calling periph start\n"));
 3764                         drv->periph_start(drv, work_ccb);
 3765                 } else {
 3766                         /*
 3767                          * Malloc failure in alloc_ccb
 3768                          */
 3769                         /*
 3770                          * XXX add us to a list to be run from free_ccb
 3771                          * if we don't have any ccbs active on this
 3772                          * device queue otherwise we may never get run
 3773                          * again.
 3774                          */
 3775                         break;
 3776                 }
 3777 
 3778                 if (drvq->entries > 0) {
 3779                         /* We have more work.  Attempt to reschedule */
 3780                         xpt_schedule_dev_allocq(bus, device);
 3781                 }
 3782         }
 3783         devq->alloc_queue.qfrozen_cnt--;
 3784 }
 3785 
 3786 static void
 3787 xpt_run_dev_sendq(struct cam_eb *bus)
 3788 {
 3789         struct  cam_devq *devq;
 3790 
 3791         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_sendq\n"));
 3792 
 3793         devq = bus->sim->devq;
 3794 
 3795         devq->send_queue.qfrozen_cnt++;
 3796         while ((devq->send_queue.entries > 0)
 3797             && (devq->send_openings > 0)) {
 3798                 struct  cam_ed_qinfo *qinfo;
 3799                 struct  cam_ed *device;
 3800                 union ccb *work_ccb;
 3801                 struct  cam_sim *sim;
 3802 
 3803                 if (devq->send_queue.qfrozen_cnt > 1) {
 3804                         break;
 3805                 }
 3806 
 3807                 qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->send_queue,
 3808                                                            CAMQ_HEAD);
 3809                 device = qinfo->device;
 3810 
 3811                 /*
 3812                  * If the device has been "frozen", don't attempt
 3813                  * to run it.
 3814                  */
 3815                 if (device->qfrozen_cnt > 0) {
 3816                         continue;
 3817                 }
 3818 
 3819                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3820                                 ("running device %p\n", device));
 3821 
 3822                 work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD);
 3823                 if (work_ccb == NULL) {
 3824                         printf("device on run queue with no ccbs???\n");
 3825                         continue;
 3826                 }
 3827 
 3828                 if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) {
 3829 
 3830                         mtx_lock(&xsoftc.xpt_lock);
 3831                         if (xsoftc.num_highpower <= 0) {
 3832                                 /*
 3833                                  * We got a high power command, but we
 3834                                  * don't have any available slots.  Freeze
 3835                                  * the device queue until we have a slot
 3836                                  * available.
 3837                                  */
 3838                                 device->qfrozen_cnt++;
 3839                                 STAILQ_INSERT_TAIL(&xsoftc.highpowerq,
 3840                                                    &work_ccb->ccb_h,
 3841                                                    xpt_links.stqe);
 3842 
 3843                                 mtx_unlock(&xsoftc.xpt_lock);
 3844                                 continue;
 3845                         } else {
 3846                                 /*
 3847                                  * Consume a high power slot while
 3848                                  * this ccb runs.
 3849                                  */
 3850                                 xsoftc.num_highpower--;
 3851                         }
 3852                         mtx_unlock(&xsoftc.xpt_lock);
 3853                 }
 3854                 devq->active_dev = device;
 3855                 cam_ccbq_remove_ccb(&device->ccbq, work_ccb);
 3856 
 3857                 cam_ccbq_send_ccb(&device->ccbq, work_ccb);
 3858 
 3859                 devq->send_openings--;
 3860                 devq->send_active++;
 3861 
 3862                 if (device->ccbq.queue.entries > 0)
 3863                         xpt_schedule_dev_sendq(bus, device);
 3864 
 3865                 if (work_ccb && (work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0){
 3866                         /*
 3867                          * The client wants to freeze the queue
 3868                          * after this CCB is sent.
 3869                          */
 3870                         device->qfrozen_cnt++;
 3871                 }
 3872 
 3873                 /* In Target mode, the peripheral driver knows best... */
 3874                 if (work_ccb->ccb_h.func_code == XPT_SCSI_IO) {
 3875                         if ((device->inq_flags & SID_CmdQue) != 0
 3876                          && work_ccb->csio.tag_action != CAM_TAG_ACTION_NONE)
 3877                                 work_ccb->ccb_h.flags |= CAM_TAG_ACTION_VALID;
 3878                         else
 3879                                 /*
 3880                                  * Clear this in case of a retried CCB that
 3881                                  * failed due to a rejected tag.
 3882                                  */
 3883                                 work_ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
 3884                 }
 3885 
 3886                 /*
 3887                  * Device queues can be shared among multiple sim instances
 3888                  * that reside on different busses.  Use the SIM in the queue
 3889                  * CCB's path, rather than the one in the bus that was passed
 3890                  * into this function.
 3891                  */
 3892                 sim = work_ccb->ccb_h.path->bus->sim;
 3893                 (*(sim->sim_action))(sim, work_ccb);
 3894 
 3895                 devq->active_dev = NULL;
 3896         }
 3897         devq->send_queue.qfrozen_cnt--;
 3898 }
 3899 
 3900 /*
 3901  * This function merges stuff from the slave ccb into the master ccb, while
 3902  * keeping important fields in the master ccb constant.
 3903  */
 3904 void
 3905 xpt_merge_ccb(union ccb *master_ccb, union ccb *slave_ccb)
 3906 {
 3907 
 3908         /*
 3909          * Pull fields that are valid for peripheral drivers to set
 3910          * into the master CCB along with the CCB "payload".
 3911          */
 3912         master_ccb->ccb_h.retry_count = slave_ccb->ccb_h.retry_count;
 3913         master_ccb->ccb_h.func_code = slave_ccb->ccb_h.func_code;
 3914         master_ccb->ccb_h.timeout = slave_ccb->ccb_h.timeout;
 3915         master_ccb->ccb_h.flags = slave_ccb->ccb_h.flags;
 3916         bcopy(&(&slave_ccb->ccb_h)[1], &(&master_ccb->ccb_h)[1],
 3917               sizeof(union ccb) - sizeof(struct ccb_hdr));
 3918 }
 3919 
 3920 void
 3921 xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority)
 3922 {
 3923 
 3924         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_setup_ccb\n"));
 3925         ccb_h->pinfo.priority = priority;
 3926         ccb_h->path = path;
 3927         ccb_h->path_id = path->bus->path_id;
 3928         if (path->target)
 3929                 ccb_h->target_id = path->target->target_id;
 3930         else
 3931                 ccb_h->target_id = CAM_TARGET_WILDCARD;
 3932         if (path->device) {
 3933                 ccb_h->target_lun = path->device->lun_id;
 3934                 ccb_h->pinfo.generation = ++path->device->ccbq.queue.generation;
 3935         } else {
 3936                 ccb_h->target_lun = CAM_TARGET_WILDCARD;
 3937         }
 3938         ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
 3939         ccb_h->flags = 0;
 3940 }
 3941 
 3942 /* Path manipulation functions */
 3943 cam_status
 3944 xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph,
 3945                 path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
 3946 {
 3947         struct     cam_path *path;
 3948         cam_status status;
 3949 
 3950         path = (struct cam_path *)malloc(sizeof(*path), M_CAMXPT, M_NOWAIT);
 3951 
 3952         if (path == NULL) {
 3953                 status = CAM_RESRC_UNAVAIL;
 3954                 return(status);
 3955         }
 3956         status = xpt_compile_path(path, perph, path_id, target_id, lun_id);
 3957         if (status != CAM_REQ_CMP) {
 3958                 free(path, M_CAMXPT);
 3959                 path = NULL;
 3960         }
 3961         *new_path_ptr = path;
 3962         return (status);
 3963 }
 3964 
 3965 cam_status
 3966 xpt_create_path_unlocked(struct cam_path **new_path_ptr,
 3967                          struct cam_periph *periph, path_id_t path_id,
 3968                          target_id_t target_id, lun_id_t lun_id)
 3969 {
 3970         struct     cam_path *path;
 3971         struct     cam_eb *bus = NULL;
 3972         cam_status status;
 3973         int        need_unlock = 0;
 3974 
 3975         path = (struct cam_path *)malloc(sizeof(*path), M_CAMXPT, M_WAITOK);
 3976 
 3977         if (path_id != CAM_BUS_WILDCARD) {
 3978                 bus = xpt_find_bus(path_id);
 3979                 if (bus != NULL) {
 3980                         need_unlock = 1;
 3981                         CAM_SIM_LOCK(bus->sim);
 3982                 }
 3983         }
 3984         status = xpt_compile_path(path, periph, path_id, target_id, lun_id);
 3985         if (need_unlock)
 3986                 CAM_SIM_UNLOCK(bus->sim);
 3987         if (status != CAM_REQ_CMP) {
 3988                 free(path, M_CAMXPT);
 3989                 path = NULL;
 3990         }
 3991         *new_path_ptr = path;
 3992         return (status);
 3993 }
 3994 
 3995 static cam_status
 3996 xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph,
 3997                  path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
 3998 {
 3999         struct       cam_eb *bus;
 4000         struct       cam_et *target;
 4001         struct       cam_ed *device;
 4002         cam_status   status;
 4003 
 4004         status = CAM_REQ_CMP;   /* Completed without error */
 4005         target = NULL;          /* Wildcarded */
 4006         device = NULL;          /* Wildcarded */
 4007 
 4008         /*
 4009          * We will potentially modify the EDT, so block interrupts
 4010          * that may attempt to create cam paths.
 4011          */
 4012         bus = xpt_find_bus(path_id);
 4013         if (bus == NULL) {
 4014                 status = CAM_PATH_INVALID;
 4015         } else {
 4016                 target = xpt_find_target(bus, target_id);
 4017                 if (target == NULL) {
 4018                         /* Create one */
 4019                         struct cam_et *new_target;
 4020 
 4021                         new_target = xpt_alloc_target(bus, target_id);
 4022                         if (new_target == NULL) {
 4023                                 status = CAM_RESRC_UNAVAIL;
 4024                         } else {
 4025                                 target = new_target;
 4026                         }
 4027                 }
 4028                 if (target != NULL) {
 4029                         device = xpt_find_device(target, lun_id);
 4030                         if (device == NULL) {
 4031                                 /* Create one */
 4032                                 struct cam_ed *new_device;
 4033 
 4034                                 new_device = xpt_alloc_device(bus,
 4035                                                               target,
 4036                                                               lun_id);
 4037                                 if (new_device == NULL) {
 4038                                         status = CAM_RESRC_UNAVAIL;
 4039                                 } else {
 4040                                         device = new_device;
 4041                                 }
 4042                         }
 4043                 }
 4044         }
 4045 
 4046         /*
 4047          * Only touch the user's data if we are successful.
 4048          */
 4049         if (status == CAM_REQ_CMP) {
 4050                 new_path->periph = perph;
 4051                 new_path->bus = bus;
 4052                 new_path->target = target;
 4053                 new_path->device = device;
 4054                 CAM_DEBUG(new_path, CAM_DEBUG_TRACE, ("xpt_compile_path\n"));
 4055         } else {
 4056                 if (device != NULL)
 4057                         xpt_release_device(bus, target, device);
 4058                 if (target != NULL)
 4059                         xpt_release_target(bus, target);
 4060                 if (bus != NULL)
 4061                         xpt_release_bus(bus);
 4062         }
 4063         return (status);
 4064 }
 4065 
 4066 static void
 4067 xpt_release_path(struct cam_path *path)
 4068 {
 4069         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_path\n"));
 4070         if (path->device != NULL) {
 4071                 xpt_release_device(path->bus, path->target, path->device);
 4072                 path->device = NULL;
 4073         }
 4074         if (path->target != NULL) {
 4075                 xpt_release_target(path->bus, path->target);
 4076                 path->target = NULL;
 4077         }
 4078         if (path->bus != NULL) {
 4079                 xpt_release_bus(path->bus);
 4080                 path->bus = NULL;
 4081         }
 4082 }
 4083 
 4084 void
 4085 xpt_free_path(struct cam_path *path)
 4086 {
 4087 
 4088         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_free_path\n"));
 4089         xpt_release_path(path);
 4090         free(path, M_CAMXPT);
 4091 }
 4092 
 4093 
 4094 /*
 4095  * Return -1 for failure, 0 for exact match, 1 for match with wildcards
 4096  * in path1, 2 for match with wildcards in path2.
 4097  */
 4098 int
 4099 xpt_path_comp(struct cam_path *path1, struct cam_path *path2)
 4100 {
 4101         int retval = 0;
 4102 
 4103         if (path1->bus != path2->bus) {
 4104                 if (path1->bus->path_id == CAM_BUS_WILDCARD)
 4105                         retval = 1;
 4106                 else if (path2->bus->path_id == CAM_BUS_WILDCARD)
 4107                         retval = 2;
 4108                 else
 4109                         return (-1);
 4110         }
 4111         if (path1->target != path2->target) {
 4112                 if (path1->target->target_id == CAM_TARGET_WILDCARD) {
 4113                         if (retval == 0)
 4114                                 retval = 1;
 4115                 } else if (path2->target->target_id == CAM_TARGET_WILDCARD)
 4116                         retval = 2;
 4117                 else
 4118                         return (-1);
 4119         }
 4120         if (path1->device != path2->device) {
 4121                 if (path1->device->lun_id == CAM_LUN_WILDCARD) {
 4122                         if (retval == 0)
 4123                                 retval = 1;
 4124                 } else if (path2->device->lun_id == CAM_LUN_WILDCARD)
 4125                         retval = 2;
 4126                 else
 4127                         return (-1);
 4128         }
 4129         return (retval);
 4130 }
 4131 
 4132 void
 4133 xpt_print_path(struct cam_path *path)
 4134 {
 4135 
 4136         if (path == NULL)
 4137                 printf("(nopath): ");
 4138         else {
 4139                 if (path->periph != NULL)
 4140                         printf("(%s%d:", path->periph->periph_name,
 4141                                path->periph->unit_number);
 4142                 else
 4143                         printf("(noperiph:");
 4144 
 4145                 if (path->bus != NULL)
 4146                         printf("%s%d:%d:", path->bus->sim->sim_name,
 4147                                path->bus->sim->unit_number,
 4148                                path->bus->sim->bus_id);
 4149                 else
 4150                         printf("nobus:");
 4151 
 4152                 if (path->target != NULL)
 4153                         printf("%d:", path->target->target_id);
 4154                 else
 4155                         printf("X:");
 4156 
 4157                 if (path->device != NULL)
 4158                         printf("%d): ", path->device->lun_id);
 4159                 else
 4160                         printf("X): ");
 4161         }
 4162 }
 4163 
 4164 void
 4165 xpt_print(struct cam_path *path, const char *fmt, ...)
 4166 {
 4167         va_list ap;
 4168         xpt_print_path(path);
 4169         va_start(ap, fmt);
 4170         vprintf(fmt, ap);
 4171         va_end(ap);
 4172 }
 4173 
 4174 int
 4175 xpt_path_string(struct cam_path *path, char *str, size_t str_len)
 4176 {
 4177         struct sbuf sb;
 4178 
 4179         mtx_assert(path->bus->sim->mtx, MA_OWNED);
 4180 
 4181         sbuf_new(&sb, str, str_len, 0);
 4182 
 4183         if (path == NULL)
 4184                 sbuf_printf(&sb, "(nopath): ");
 4185         else {
 4186                 if (path->periph != NULL)
 4187                         sbuf_printf(&sb, "(%s%d:", path->periph->periph_name,
 4188                                     path->periph->unit_number);
 4189                 else
 4190                         sbuf_printf(&sb, "(noperiph:");
 4191 
 4192                 if (path->bus != NULL)
 4193                         sbuf_printf(&sb, "%s%d:%d:", path->bus->sim->sim_name,
 4194                                     path->bus->sim->unit_number,
 4195                                     path->bus->sim->bus_id);
 4196                 else
 4197                         sbuf_printf(&sb, "nobus:");
 4198 
 4199                 if (path->target != NULL)
 4200                         sbuf_printf(&sb, "%d:", path->target->target_id);
 4201                 else
 4202                         sbuf_printf(&sb, "X:");
 4203 
 4204                 if (path->device != NULL)
 4205                         sbuf_printf(&sb, "%d): ", path->device->lun_id);
 4206                 else
 4207                         sbuf_printf(&sb, "X): ");
 4208         }
 4209         sbuf_finish(&sb);
 4210 
 4211         return(sbuf_len(&sb));
 4212 }
 4213 
 4214 path_id_t
 4215 xpt_path_path_id(struct cam_path *path)
 4216 {
 4217         mtx_assert(path->bus->sim->mtx, MA_OWNED);
 4218 
 4219         return(path->bus->path_id);
 4220 }
 4221 
 4222 target_id_t
 4223 xpt_path_target_id(struct cam_path *path)
 4224 {
 4225         mtx_assert(path->bus->sim->mtx, MA_OWNED);
 4226 
 4227         if (path->target != NULL)
 4228                 return (path->target->target_id);
 4229         else
 4230                 return (CAM_TARGET_WILDCARD);
 4231 }
 4232 
 4233 lun_id_t
 4234 xpt_path_lun_id(struct cam_path *path)
 4235 {
 4236         mtx_assert(path->bus->sim->mtx, MA_OWNED);
 4237 
 4238         if (path->device != NULL)
 4239                 return (path->device->lun_id);
 4240         else
 4241                 return (CAM_LUN_WILDCARD);
 4242 }
 4243 
 4244 struct cam_sim *
 4245 xpt_path_sim(struct cam_path *path)
 4246 {
 4247 
 4248         return (path->bus->sim);
 4249 }
 4250 
 4251 struct cam_periph*
 4252 xpt_path_periph(struct cam_path *path)
 4253 {
 4254         mtx_assert(path->bus->sim->mtx, MA_OWNED);
 4255 
 4256         return (path->periph);
 4257 }
 4258 
 4259 /*
 4260  * Release a CAM control block for the caller.  Remit the cost of the structure
 4261  * to the device referenced by the path.  If the this device had no 'credits'
 4262  * and peripheral drivers have registered async callbacks for this notification
 4263  * call them now.
 4264  */
 4265 void
 4266 xpt_release_ccb(union ccb *free_ccb)
 4267 {
 4268         struct   cam_path *path;
 4269         struct   cam_ed *device;
 4270         struct   cam_eb *bus;
 4271         struct   cam_sim *sim;
 4272 
 4273         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_release_ccb\n"));
 4274         path = free_ccb->ccb_h.path;
 4275         device = path->device;
 4276         bus = path->bus;
 4277         sim = bus->sim;
 4278 
 4279         mtx_assert(sim->mtx, MA_OWNED);
 4280 
 4281         cam_ccbq_release_opening(&device->ccbq);
 4282         if (sim->ccb_count > sim->max_ccbs) {
 4283                 xpt_free_ccb(free_ccb);
 4284                 sim->ccb_count--;
 4285         } else {
 4286                 SLIST_INSERT_HEAD(&sim->ccb_freeq, &free_ccb->ccb_h,
 4287                     xpt_links.sle);
 4288         }
 4289         if (sim->devq == NULL) {
 4290                 return;
 4291         }
 4292         sim->devq->alloc_openings++;
 4293         sim->devq->alloc_active--;
 4294         /* XXX Turn this into an inline function - xpt_run_device?? */
 4295         if ((device_is_alloc_queued(device) == 0)
 4296          && (device->drvq.entries > 0)) {
 4297                 xpt_schedule_dev_allocq(bus, device);
 4298         }
 4299         if (dev_allocq_is_runnable(sim->devq))
 4300                 xpt_run_dev_allocq(bus);
 4301 }
 4302 
 4303 /* Functions accessed by SIM drivers */
 4304 
 4305 /*
 4306  * A sim structure, listing the SIM entry points and instance
 4307  * identification info is passed to xpt_bus_register to hook the SIM
 4308  * into the CAM framework.  xpt_bus_register creates a cam_eb entry
 4309  * for this new bus and places it in the array of busses and assigns
 4310  * it a path_id.  The path_id may be influenced by "hard wiring"
 4311  * information specified by the user.  Once interrupt services are
 4312  * availible, the bus will be probed.
 4313  */
 4314 int32_t
 4315 xpt_bus_register(struct cam_sim *sim, device_t parent, u_int32_t bus)
 4316 {
 4317         struct cam_eb *new_bus;
 4318         struct cam_eb *old_bus;
 4319         struct ccb_pathinq cpi;
 4320 
 4321         mtx_assert(sim->mtx, MA_OWNED);
 4322 
 4323         sim->bus_id = bus;
 4324         new_bus = (struct cam_eb *)malloc(sizeof(*new_bus),
 4325                                           M_CAMXPT, M_NOWAIT);
 4326         if (new_bus == NULL) {
 4327                 /* Couldn't satisfy request */
 4328                 return (CAM_RESRC_UNAVAIL);
 4329         }
 4330 
 4331         if (strcmp(sim->sim_name, "xpt") != 0) {
 4332 
 4333                 sim->path_id =
 4334                     xptpathid(sim->sim_name, sim->unit_number, sim->bus_id);
 4335         }
 4336 
 4337         TAILQ_INIT(&new_bus->et_entries);
 4338         new_bus->path_id = sim->path_id;
 4339         new_bus->sim = sim;
 4340         timevalclear(&new_bus->last_reset);
 4341         new_bus->flags = 0;
 4342         new_bus->refcount = 1;  /* Held until a bus_deregister event */
 4343         new_bus->generation = 0;
 4344         mtx_lock(&xsoftc.xpt_topo_lock);
 4345         old_bus = TAILQ_FIRST(&xsoftc.xpt_busses);
 4346         while (old_bus != NULL
 4347             && old_bus->path_id < new_bus->path_id)
 4348                 old_bus = TAILQ_NEXT(old_bus, links);
 4349         if (old_bus != NULL)
 4350                 TAILQ_INSERT_BEFORE(old_bus, new_bus, links);
 4351         else
 4352                 TAILQ_INSERT_TAIL(&xsoftc.xpt_busses, new_bus, links);
 4353         xsoftc.bus_generation++;
 4354         mtx_unlock(&xsoftc.xpt_topo_lock);
 4355 
 4356         /* Notify interested parties */
 4357         if (sim->path_id != CAM_XPT_PATH_ID) {
 4358                 struct cam_path path;
 4359 
 4360                 xpt_compile_path(&path, /*periph*/NULL, sim->path_id,
 4361                                  CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
 4362                 xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
 4363                 cpi.ccb_h.func_code = XPT_PATH_INQ;
 4364                 xpt_action((union ccb *)&cpi);
 4365                 xpt_async(AC_PATH_REGISTERED, &path, &cpi);
 4366                 xpt_release_path(&path);
 4367         }
 4368         return (CAM_SUCCESS);
 4369 }
 4370 
 4371 int32_t
 4372 xpt_bus_deregister(path_id_t pathid)
 4373 {
 4374         struct cam_path bus_path;
 4375         struct cam_ed *device;
 4376         struct cam_ed_qinfo *qinfo;
 4377         struct cam_devq *devq;
 4378         struct cam_periph *periph;
 4379         struct cam_sim *ccbsim;
 4380         union ccb *work_ccb;
 4381         cam_status status;
 4382 
 4383 
 4384         status = xpt_compile_path(&bus_path, NULL, pathid,
 4385                                   CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
 4386         if (status != CAM_REQ_CMP)
 4387                 return (status);
 4388 
 4389         xpt_async(AC_LOST_DEVICE, &bus_path, NULL);
 4390         xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL);
 4391 
 4392         /* The SIM may be gone, so use a dummy SIM for any stray operations. */
 4393         devq = bus_path.bus->sim->devq;
 4394         ccbsim = bus_path.bus->sim;
 4395         bus_path.bus->sim = &cam_dead_sim;
 4396 
 4397         /* Execute any pending operations now. */
 4398         while ((qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->send_queue,
 4399             CAMQ_HEAD)) != NULL ||
 4400             (qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->alloc_queue,
 4401             CAMQ_HEAD)) != NULL) {
 4402                 do {
 4403                         device = qinfo->device;
 4404                         work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD);
 4405                         if (work_ccb != NULL) {
 4406                                 devq->active_dev = device;
 4407                                 cam_ccbq_remove_ccb(&device->ccbq, work_ccb);
 4408                                 cam_ccbq_send_ccb(&device->ccbq, work_ccb);
 4409                                 (*(ccbsim->sim_action))(ccbsim, work_ccb);
 4410                         }
 4411 
 4412                         periph = (struct cam_periph *)camq_remove(&device->drvq,
 4413                             CAMQ_HEAD);
 4414                         if (periph != NULL)
 4415                                 xpt_schedule(periph, periph->pinfo.priority);
 4416                 } while (work_ccb != NULL || periph != NULL);
 4417         }
 4418 
 4419         /* Make sure all completed CCBs are processed. */
 4420         while (!TAILQ_EMPTY(&ccbsim->sim_doneq)) {
 4421                 camisr_runqueue(&ccbsim->sim_doneq);
 4422 
 4423                 /* Repeat the async's for the benefit of any new devices. */
 4424                 xpt_async(AC_LOST_DEVICE, &bus_path, NULL);
 4425                 xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL);
 4426         }
 4427 
 4428         /* Release the reference count held while registered. */
 4429         xpt_release_bus(bus_path.bus);
 4430         xpt_release_path(&bus_path);
 4431 
 4432         return (CAM_REQ_CMP);
 4433 }
 4434 
 4435 static path_id_t
 4436 xptnextfreepathid(void)
 4437 {
 4438         struct cam_eb *bus;
 4439         path_id_t pathid;
 4440         const char *strval;
 4441 
 4442         pathid = 0;
 4443         mtx_lock(&xsoftc.xpt_topo_lock);
 4444         bus = TAILQ_FIRST(&xsoftc.xpt_busses);
 4445 retry:
 4446         /* Find an unoccupied pathid */
 4447         while (bus != NULL && bus->path_id <= pathid) {
 4448                 if (bus->path_id == pathid)
 4449                         pathid++;
 4450                 bus = TAILQ_NEXT(bus, links);
 4451         }
 4452         mtx_unlock(&xsoftc.xpt_topo_lock);
 4453 
 4454         /*
 4455          * Ensure that this pathid is not reserved for
 4456          * a bus that may be registered in the future.
 4457          */
 4458         if (resource_string_value("scbus", pathid, "at", &strval) == 0) {
 4459                 ++pathid;
 4460                 /* Start the search over */
 4461                 mtx_lock(&xsoftc.xpt_topo_lock);
 4462                 goto retry;
 4463         }
 4464         return (pathid);
 4465 }
 4466 
 4467 static path_id_t
 4468 xptpathid(const char *sim_name, int sim_unit, int sim_bus)
 4469 {
 4470         path_id_t pathid;
 4471         int i, dunit, val;
 4472         char buf[32];
 4473         const char *dname;
 4474 
 4475         pathid = CAM_XPT_PATH_ID;
 4476         snprintf(buf, sizeof(buf), "%s%d", sim_name, sim_unit);
 4477         i = 0;
 4478         while ((resource_find_match(&i, &dname, &dunit, "at", buf)) == 0) {
 4479                 if (strcmp(dname, "scbus")) {
 4480                         /* Avoid a bit of foot shooting. */
 4481                         continue;
 4482                 }
 4483                 if (dunit < 0)          /* unwired?! */
 4484                         continue;
 4485                 if (resource_int_value("scbus", dunit, "bus", &val) == 0) {
 4486                         if (sim_bus == val) {
 4487                                 pathid = dunit;
 4488                                 break;
 4489                         }
 4490                 } else if (sim_bus == 0) {
 4491                         /* Unspecified matches bus 0 */
 4492                         pathid = dunit;
 4493                         break;
 4494                 } else {
 4495                         printf("Ambiguous scbus configuration for %s%d "
 4496                                "bus %d, cannot wire down.  The kernel "
 4497                                "config entry for scbus%d should "
 4498                                "specify a controller bus.\n"
 4499                                "Scbus will be assigned dynamically.\n",
 4500                                sim_name, sim_unit, sim_bus, dunit);
 4501                         break;
 4502                 }
 4503         }
 4504 
 4505         if (pathid == CAM_XPT_PATH_ID)
 4506                 pathid = xptnextfreepathid();
 4507         return (pathid);
 4508 }
 4509 
 4510 void
 4511 xpt_async(u_int32_t async_code, struct cam_path *path, void *async_arg)
 4512 {
 4513         struct cam_eb *bus;
 4514         struct cam_et *target, *next_target;
 4515         struct cam_ed *device, *next_device;
 4516 
 4517         mtx_assert(path->bus->sim->mtx, MA_OWNED);
 4518 
 4519         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_async\n"));
 4520 
 4521         /*
 4522          * Most async events come from a CAM interrupt context.  In
 4523          * a few cases, the error recovery code at the peripheral layer,
 4524          * which may run from our SWI or a process context, may signal
 4525          * deferred events with a call to xpt_async.
 4526          */
 4527 
 4528         bus = path->bus;
 4529 
 4530         if (async_code == AC_BUS_RESET) {
 4531                 /* Update our notion of when the last reset occurred */
 4532                 microtime(&bus->last_reset);
 4533         }
 4534 
 4535         for (target = TAILQ_FIRST(&bus->et_entries);
 4536              target != NULL;
 4537              target = next_target) {
 4538 
 4539                 next_target = TAILQ_NEXT(target, links);
 4540 
 4541                 if (path->target != target
 4542                  && path->target->target_id != CAM_TARGET_WILDCARD
 4543                  && target->target_id != CAM_TARGET_WILDCARD)
 4544                         continue;
 4545 
 4546                 if (async_code == AC_SENT_BDR) {
 4547                         /* Update our notion of when the last reset occurred */
 4548                         microtime(&path->target->last_reset);
 4549                 }
 4550 
 4551                 for (device = TAILQ_FIRST(&target->ed_entries);
 4552                      device != NULL;
 4553                      device = next_device) {
 4554 
 4555                         next_device = TAILQ_NEXT(device, links);
 4556 
 4557                         if (path->device != device
 4558                          && path->device->lun_id != CAM_LUN_WILDCARD
 4559                          && device->lun_id != CAM_LUN_WILDCARD)
 4560                                 continue;
 4561 
 4562                         xpt_dev_async(async_code, bus, target,
 4563                                       device, async_arg);
 4564 
 4565                         xpt_async_bcast(&device->asyncs, async_code,
 4566                                         path, async_arg);
 4567                 }
 4568         }
 4569 
 4570         /*
 4571          * If this wasn't a fully wildcarded async, tell all
 4572          * clients that want all async events.
 4573          */
 4574         if (bus != xpt_periph->path->bus)
 4575                 xpt_async_bcast(&xpt_periph->path->device->asyncs, async_code,
 4576                                 path, async_arg);
 4577 }
 4578 
 4579 static void
 4580 xpt_async_bcast(struct async_list *async_head,
 4581                 u_int32_t async_code,
 4582                 struct cam_path *path, void *async_arg)
 4583 {
 4584         struct async_node *cur_entry;
 4585 
 4586         cur_entry = SLIST_FIRST(async_head);
 4587         while (cur_entry != NULL) {
 4588                 struct async_node *next_entry;
 4589                 /*
 4590                  * Grab the next list entry before we call the current
 4591                  * entry's callback.  This is because the callback function
 4592                  * can delete its async callback entry.
 4593                  */
 4594                 next_entry = SLIST_NEXT(cur_entry, links);
 4595                 if ((cur_entry->event_enable & async_code) != 0)
 4596                         cur_entry->callback(cur_entry->callback_arg,
 4597                                             async_code, path,
 4598                                             async_arg);
 4599                 cur_entry = next_entry;
 4600         }
 4601 }
 4602 
 4603 /*
 4604  * Handle any per-device event notifications that require action by the XPT.
 4605  */
 4606 static void
 4607 xpt_dev_async(u_int32_t async_code, struct cam_eb *bus, struct cam_et *target,
 4608               struct cam_ed *device, void *async_arg)
 4609 {
 4610         cam_status status;
 4611         struct cam_path newpath;
 4612 
 4613         /*
 4614          * We only need to handle events for real devices.
 4615          */
 4616         if (target->target_id == CAM_TARGET_WILDCARD
 4617          || device->lun_id == CAM_LUN_WILDCARD)
 4618                 return;
 4619 
 4620         /*
 4621          * We need our own path with wildcards expanded to
 4622          * handle certain types of events.
 4623          */
 4624         if ((async_code == AC_SENT_BDR)
 4625          || (async_code == AC_BUS_RESET)
 4626          || (async_code == AC_INQ_CHANGED))
 4627                 status = xpt_compile_path(&newpath, NULL,
 4628                                           bus->path_id,
 4629                                           target->target_id,
 4630                                           device->lun_id);
 4631         else
 4632                 status = CAM_REQ_CMP_ERR;
 4633 
 4634         if (status == CAM_REQ_CMP) {
 4635 
 4636                 /*
 4637                  * Allow transfer negotiation to occur in a
 4638                  * tag free environment.
 4639                  */
 4640                 if (async_code == AC_SENT_BDR
 4641                  || async_code == AC_BUS_RESET)
 4642                         xpt_toggle_tags(&newpath);
 4643 
 4644                 if (async_code == AC_INQ_CHANGED) {
 4645                         /*
 4646                          * We've sent a start unit command, or
 4647                          * something similar to a device that
 4648                          * may have caused its inquiry data to
 4649                          * change. So we re-scan the device to
 4650                          * refresh the inquiry data for it.
 4651                          */
 4652                         xpt_scan_lun(newpath.periph, &newpath,
 4653                                      CAM_EXPECT_INQ_CHANGE, NULL);
 4654                 }
 4655                 xpt_release_path(&newpath);
 4656         } else if (async_code == AC_LOST_DEVICE) {
 4657                 device->flags |= CAM_DEV_UNCONFIGURED;
 4658         } else if (async_code == AC_TRANSFER_NEG) {
 4659                 struct ccb_trans_settings *settings;
 4660 
 4661                 settings = (struct ccb_trans_settings *)async_arg;
 4662                 xpt_set_transfer_settings(settings, device,
 4663                                           /*async_update*/TRUE);
 4664         }
 4665 }
 4666 
 4667 u_int32_t
 4668 xpt_freeze_devq(struct cam_path *path, u_int count)
 4669 {
 4670         struct ccb_hdr *ccbh;
 4671 
 4672         mtx_assert(path->bus->sim->mtx, MA_OWNED);
 4673 
 4674         path->device->qfrozen_cnt += count;
 4675 
 4676         /*
 4677          * Mark the last CCB in the queue as needing
 4678          * to be requeued if the driver hasn't
 4679          * changed it's state yet.  This fixes a race
 4680          * where a ccb is just about to be queued to
 4681          * a controller driver when it's interrupt routine
 4682          * freezes the queue.  To completly close the
 4683          * hole, controller drives must check to see
 4684          * if a ccb's status is still CAM_REQ_INPROG
 4685          * just before they queue
 4686          * the CCB.  See ahc_action/ahc_freeze_devq for
 4687          * an example.
 4688          */
 4689         ccbh = TAILQ_LAST(&path->device->ccbq.active_ccbs, ccb_hdr_tailq);
 4690         if (ccbh && ccbh->status == CAM_REQ_INPROG)
 4691                 ccbh->status = CAM_REQUEUE_REQ;
 4692         return (path->device->qfrozen_cnt);
 4693 }
 4694 
 4695 u_int32_t
 4696 xpt_freeze_simq(struct cam_sim *sim, u_int count)
 4697 {
 4698         mtx_assert(sim->mtx, MA_OWNED);
 4699 
 4700         sim->devq->send_queue.qfrozen_cnt += count;
 4701         if (sim->devq->active_dev != NULL) {
 4702                 struct ccb_hdr *ccbh;
 4703 
 4704                 ccbh = TAILQ_LAST(&sim->devq->active_dev->ccbq.active_ccbs,
 4705                                   ccb_hdr_tailq);
 4706                 if (ccbh && ccbh->status == CAM_REQ_INPROG)
 4707                         ccbh->status = CAM_REQUEUE_REQ;
 4708         }
 4709         return (sim->devq->send_queue.qfrozen_cnt);
 4710 }
 4711 
 4712 static void
 4713 xpt_release_devq_timeout(void *arg)
 4714 {
 4715         struct cam_ed *device;
 4716 
 4717         device = (struct cam_ed *)arg;
 4718 
 4719         xpt_release_devq_device(device, /*count*/1, /*run_queue*/TRUE);
 4720 }
 4721 
 4722 void
 4723 xpt_release_devq(struct cam_path *path, u_int count, int run_queue)
 4724 {
 4725         mtx_assert(path->bus->sim->mtx, MA_OWNED);
 4726 
 4727         xpt_release_devq_device(path->device, count, run_queue);
 4728 }
 4729 
 4730 static void
 4731 xpt_release_devq_device(struct cam_ed *dev, u_int count, int run_queue)
 4732 {
 4733         int     rundevq;
 4734 
 4735         rundevq = 0;
 4736         if (dev->qfrozen_cnt > 0) {
 4737 
 4738                 count = (count > dev->qfrozen_cnt) ? dev->qfrozen_cnt : count;
 4739                 dev->qfrozen_cnt -= count;
 4740                 if (dev->qfrozen_cnt == 0) {
 4741 
 4742                         /*
 4743                          * No longer need to wait for a successful
 4744                          * command completion.
 4745                          */
 4746                         dev->flags &= ~CAM_DEV_REL_ON_COMPLETE;
 4747 
 4748                         /*
 4749                          * Remove any timeouts that might be scheduled
 4750                          * to release this queue.
 4751                          */
 4752                         if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
 4753                                 callout_stop(&dev->callout);
 4754                                 dev->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING;
 4755                         }
 4756 
 4757                         /*
 4758                          * Now that we are unfrozen schedule the
 4759                          * device so any pending transactions are
 4760                          * run.
 4761                          */
 4762                         if ((dev->ccbq.queue.entries > 0)
 4763                          && (xpt_schedule_dev_sendq(dev->target->bus, dev))
 4764                          && (run_queue != 0)) {
 4765                                 rundevq = 1;
 4766                         }
 4767                 }
 4768         }
 4769         if (rundevq != 0)
 4770                 xpt_run_dev_sendq(dev->target->bus);
 4771 }
 4772 
 4773 void
 4774 xpt_release_simq(struct cam_sim *sim, int run_queue)
 4775 {
 4776         struct  camq *sendq;
 4777 
 4778         mtx_assert(sim->mtx, MA_OWNED);
 4779 
 4780         sendq = &(sim->devq->send_queue);
 4781         if (sendq->qfrozen_cnt > 0) {
 4782 
 4783                 sendq->qfrozen_cnt--;
 4784                 if (sendq->qfrozen_cnt == 0) {
 4785                         struct cam_eb *bus;
 4786 
 4787                         /*
 4788                          * If there is a timeout scheduled to release this
 4789                          * sim queue, remove it.  The queue frozen count is
 4790                          * already at 0.
 4791                          */
 4792                         if ((sim->flags & CAM_SIM_REL_TIMEOUT_PENDING) != 0){
 4793                                 callout_stop(&sim->callout);
 4794                                 sim->flags &= ~CAM_SIM_REL_TIMEOUT_PENDING;
 4795                         }
 4796                         bus = xpt_find_bus(sim->path_id);
 4797 
 4798                         if (run_queue) {
 4799                                 /*
 4800                                  * Now that we are unfrozen run the send queue.
 4801                                  */
 4802                                 xpt_run_dev_sendq(bus);
 4803                         }
 4804                         xpt_release_bus(bus);
 4805                 }
 4806         }
 4807 }
 4808 
 4809 /*
 4810  * XXX Appears to be unused.
 4811  */
 4812 static void
 4813 xpt_release_simq_timeout(void *arg)
 4814 {
 4815         struct cam_sim *sim;
 4816 
 4817         sim = (struct cam_sim *)arg;
 4818         xpt_release_simq(sim, /* run_queue */ TRUE);
 4819 }
 4820 
 4821 void
 4822 xpt_done(union ccb *done_ccb)
 4823 {
 4824         struct cam_sim *sim;
 4825 
 4826         CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_done\n"));
 4827         if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) != 0) {
 4828                 /*
 4829                  * Queue up the request for handling by our SWI handler
 4830                  * any of the "non-immediate" type of ccbs.
 4831                  */
 4832                 sim = done_ccb->ccb_h.path->bus->sim;
 4833                 switch (done_ccb->ccb_h.path->periph->type) {
 4834                 case CAM_PERIPH_BIO:
 4835                         TAILQ_INSERT_TAIL(&sim->sim_doneq, &done_ccb->ccb_h,
 4836                                           sim_links.tqe);
 4837                         done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX;
 4838                         if ((sim->flags & CAM_SIM_ON_DONEQ) == 0) {
 4839                                 mtx_lock(&cam_simq_lock);
 4840                                 TAILQ_INSERT_TAIL(&cam_simq, sim,
 4841                                                   links);
 4842                                 sim->flags |= CAM_SIM_ON_DONEQ;
 4843                                 mtx_unlock(&cam_simq_lock);
 4844                         }
 4845                         if ((done_ccb->ccb_h.path->periph->flags &
 4846                             CAM_PERIPH_POLLED) == 0)
 4847                                 swi_sched(cambio_ih, 0);
 4848                         break;
 4849                 default:
 4850                         panic("unknown periph type %d",
 4851                             done_ccb->ccb_h.path->periph->type);
 4852                 }
 4853         }
 4854 }
 4855 
 4856 union ccb *
 4857 xpt_alloc_ccb()
 4858 {
 4859         union ccb *new_ccb;
 4860 
 4861         new_ccb = malloc(sizeof(*new_ccb), M_CAMXPT, M_ZERO|M_WAITOK);
 4862         return (new_ccb);
 4863 }
 4864 
 4865 union ccb *
 4866 xpt_alloc_ccb_nowait()
 4867 {
 4868         union ccb *new_ccb;
 4869 
 4870         new_ccb = malloc(sizeof(*new_ccb), M_CAMXPT, M_ZERO|M_NOWAIT);
 4871         return (new_ccb);
 4872 }
 4873 
 4874 void
 4875 xpt_free_ccb(union ccb *free_ccb)
 4876 {
 4877         free(free_ccb, M_CAMXPT);
 4878 }
 4879 
 4880 
 4881 
 4882 /* Private XPT functions */
 4883 
 4884 /*
 4885  * Get a CAM control block for the caller. Charge the structure to the device
 4886  * referenced by the path.  If the this device has no 'credits' then the
 4887  * device already has the maximum number of outstanding operations under way
 4888  * and we return NULL. If we don't have sufficient resources to allocate more
 4889  * ccbs, we also return NULL.
 4890  */
 4891 static union ccb *
 4892 xpt_get_ccb(struct cam_ed *device)
 4893 {
 4894         union ccb *new_ccb;
 4895         struct cam_sim *sim;
 4896 
 4897         sim = device->sim;
 4898         if ((new_ccb = (union ccb *)SLIST_FIRST(&sim->ccb_freeq)) == NULL) {
 4899                 new_ccb = xpt_alloc_ccb_nowait();
 4900                 if (new_ccb == NULL) {
 4901                         return (NULL);
 4902                 }
 4903                 if ((sim->flags & CAM_SIM_MPSAFE) == 0)
 4904                         callout_handle_init(&new_ccb->ccb_h.timeout_ch);
 4905                 SLIST_INSERT_HEAD(&sim->ccb_freeq, &new_ccb->ccb_h,
 4906                                   xpt_links.sle);
 4907                 sim->ccb_count++;
 4908         }
 4909         cam_ccbq_take_opening(&device->ccbq);
 4910         SLIST_REMOVE_HEAD(&sim->ccb_freeq, xpt_links.sle);
 4911         return (new_ccb);
 4912 }
 4913 
 4914 static void
 4915 xpt_release_bus(struct cam_eb *bus)
 4916 {
 4917 
 4918         if ((--bus->refcount == 0)
 4919          && (TAILQ_FIRST(&bus->et_entries) == NULL)) {
 4920                 mtx_lock(&xsoftc.xpt_topo_lock);
 4921                 TAILQ_REMOVE(&xsoftc.xpt_busses, bus, links);
 4922                 xsoftc.bus_generation++;
 4923                 mtx_unlock(&xsoftc.xpt_topo_lock);
 4924                 free(bus, M_CAMXPT);
 4925         }
 4926 }
 4927 
 4928 static struct cam_et *
 4929 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id)
 4930 {
 4931         struct cam_et *target;
 4932 
 4933         target = (struct cam_et *)malloc(sizeof(*target), M_CAMXPT, M_NOWAIT);
 4934         if (target != NULL) {
 4935                 struct cam_et *cur_target;
 4936 
 4937                 TAILQ_INIT(&target->ed_entries);
 4938                 target->bus = bus;
 4939                 target->target_id = target_id;
 4940                 target->refcount = 1;
 4941                 target->generation = 0;
 4942                 timevalclear(&target->last_reset);
 4943                 /*
 4944                  * Hold a reference to our parent bus so it
 4945                  * will not go away before we do.
 4946                  */
 4947                 bus->refcount++;
 4948 
 4949                 /* Insertion sort into our bus's target list */
 4950                 cur_target = TAILQ_FIRST(&bus->et_entries);
 4951                 while (cur_target != NULL && cur_target->target_id < target_id)
 4952                         cur_target = TAILQ_NEXT(cur_target, links);
 4953 
 4954                 if (cur_target != NULL) {
 4955                         TAILQ_INSERT_BEFORE(cur_target, target, links);
 4956                 } else {
 4957                         TAILQ_INSERT_TAIL(&bus->et_entries, target, links);
 4958                 }
 4959                 bus->generation++;
 4960         }
 4961         return (target);
 4962 }
 4963 
 4964 static void
 4965 xpt_release_target(struct cam_eb *bus, struct cam_et *target)
 4966 {
 4967 
 4968         if ((--target->refcount == 0)
 4969          && (TAILQ_FIRST(&target->ed_entries) == NULL)) {
 4970                 TAILQ_REMOVE(&bus->et_entries, target, links);
 4971                 bus->generation++;
 4972                 free(target, M_CAMXPT);
 4973                 xpt_release_bus(bus);
 4974         }
 4975 }
 4976 
 4977 static struct cam_ed *
 4978 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id)
 4979 {
 4980         struct     cam_path path;
 4981         struct     cam_ed *device;
 4982         struct     cam_devq *devq;
 4983         cam_status status;
 4984 
 4985         if (SIM_DEAD(bus->sim))
 4986                 return (NULL);
 4987 
 4988         /* Make space for us in the device queue on our bus */
 4989         devq = bus->sim->devq;
 4990         status = cam_devq_resize(devq, devq->alloc_queue.array_size + 1);
 4991 
 4992         if (status != CAM_REQ_CMP) {
 4993                 device = NULL;
 4994         } else {
 4995                 device = (struct cam_ed *)malloc(sizeof(*device),
 4996                                                  M_CAMXPT, M_NOWAIT);
 4997         }
 4998 
 4999         if (device != NULL) {
 5000                 struct cam_ed *cur_device;
 5001 
 5002                 cam_init_pinfo(&device->alloc_ccb_entry.pinfo);
 5003                 device->alloc_ccb_entry.device = device;
 5004                 cam_init_pinfo(&device->send_ccb_entry.pinfo);
 5005                 device->send_ccb_entry.device = device;
 5006                 device->target = target;
 5007                 device->lun_id = lun_id;
 5008                 device->sim = bus->sim;
 5009                 /* Initialize our queues */
 5010                 if (camq_init(&device->drvq, 0) != 0) {
 5011                         free(device, M_CAMXPT);
 5012                         return (NULL);
 5013                 }
 5014                 if (cam_ccbq_init(&device->ccbq,
 5015                                   bus->sim->max_dev_openings) != 0) {
 5016                         camq_fini(&device->drvq);
 5017                         free(device, M_CAMXPT);
 5018                         return (NULL);
 5019                 }
 5020                 SLIST_INIT(&device->asyncs);
 5021                 SLIST_INIT(&device->periphs);
 5022                 device->generation = 0;
 5023                 device->owner = NULL;
 5024                 /*
 5025                  * Take the default quirk entry until we have inquiry
 5026                  * data and can determine a better quirk to use.
 5027                  */
 5028                 device->quirk = &xpt_quirk_table[xpt_quirk_table_size - 1];
 5029                 bzero(&device->inq_data, sizeof(device->inq_data));
 5030                 device->inq_flags = 0;
 5031                 device->queue_flags = 0;
 5032                 device->serial_num = NULL;
 5033                 device->serial_num_len = 0;
 5034                 device->qfrozen_cnt = 0;
 5035                 device->flags = CAM_DEV_UNCONFIGURED;
 5036                 device->tag_delay_count = 0;
 5037                 device->tag_saved_openings = 0;
 5038                 device->refcount = 1;
 5039                 if (bus->sim->flags & CAM_SIM_MPSAFE)
 5040                         callout_init_mtx(&device->callout, bus->sim->mtx, 0);
 5041                 else
 5042                         callout_init_mtx(&device->callout, &Giant, 0);
 5043 
 5044                 /*
 5045                  * Hold a reference to our parent target so it
 5046                  * will not go away before we do.
 5047                  */
 5048                 target->refcount++;
 5049 
 5050                 /*
 5051                  * XXX should be limited by number of CCBs this bus can
 5052                  * do.
 5053                  */
 5054                 bus->sim->max_ccbs += device->ccbq.devq_openings;
 5055                 /* Insertion sort into our target's device list */
 5056                 cur_device = TAILQ_FIRST(&target->ed_entries);
 5057                 while (cur_device != NULL && cur_device->lun_id < lun_id)
 5058                         cur_device = TAILQ_NEXT(cur_device, links);
 5059                 if (cur_device != NULL) {
 5060                         TAILQ_INSERT_BEFORE(cur_device, device, links);
 5061                 } else {
 5062                         TAILQ_INSERT_TAIL(&target->ed_entries, device, links);
 5063                 }
 5064                 target->generation++;
 5065                 if (lun_id != CAM_LUN_WILDCARD) {
 5066                         xpt_compile_path(&path,
 5067                                          NULL,
 5068                                          bus->path_id,
 5069                                          target->target_id,
 5070                                          lun_id);
 5071                         xpt_devise_transport(&path);
 5072                         xpt_release_path(&path);
 5073                 }
 5074         }
 5075         return (device);
 5076 }
 5077 
 5078 static void
 5079 xpt_release_device(struct cam_eb *bus, struct cam_et *target,
 5080                    struct cam_ed *device)
 5081 {
 5082 
 5083         if ((--device->refcount == 0)
 5084          && ((device->flags & CAM_DEV_UNCONFIGURED) != 0)) {
 5085                 struct cam_devq *devq;
 5086 
 5087                 if (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX
 5088                  || device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX)
 5089                         panic("Removing device while still queued for ccbs");
 5090 
 5091                 if ((device->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0)
 5092                                 callout_stop(&device->callout);
 5093 
 5094                 TAILQ_REMOVE(&target->ed_entries, device,links);
 5095                 target->generation++;
 5096                 bus->sim->max_ccbs -= device->ccbq.devq_openings;
 5097                 if (!SIM_DEAD(bus->sim)) {
 5098                         /* Release our slot in the devq */
 5099                         devq = bus->sim->devq;
 5100                         cam_devq_resize(devq, devq->alloc_queue.array_size - 1);
 5101                 }
 5102                 camq_fini(&device->drvq);
 5103                 camq_fini(&device->ccbq.queue);
 5104                 free(device, M_CAMXPT);
 5105                 xpt_release_target(bus, target);
 5106         }
 5107 }
 5108 
 5109 static u_int32_t
 5110 xpt_dev_ccbq_resize(struct cam_path *path, int newopenings)
 5111 {
 5112         int     diff;
 5113         int     result;
 5114         struct  cam_ed *dev;
 5115 
 5116         dev = path->device;
 5117 
 5118         diff = newopenings - (dev->ccbq.dev_active + dev->ccbq.dev_openings);
 5119         result = cam_ccbq_resize(&dev->ccbq, newopenings);
 5120         if (result == CAM_REQ_CMP && (diff < 0)) {
 5121                 dev->flags |= CAM_DEV_RESIZE_QUEUE_NEEDED;
 5122         }
 5123         if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
 5124          || (dev->inq_flags & SID_CmdQue) != 0)
 5125                 dev->tag_saved_openings = newopenings;
 5126         /* Adjust the global limit */
 5127         dev->sim->max_ccbs += diff;
 5128         return (result);
 5129 }
 5130 
 5131 static struct cam_eb *
 5132 xpt_find_bus(path_id_t path_id)
 5133 {
 5134         struct cam_eb *bus;
 5135 
 5136         mtx_lock(&xsoftc.xpt_topo_lock);
 5137         for (bus = TAILQ_FIRST(&xsoftc.xpt_busses);
 5138              bus != NULL;
 5139              bus = TAILQ_NEXT(bus, links)) {
 5140                 if (bus->path_id == path_id) {
 5141                         bus->refcount++;
 5142                         break;
 5143                 }
 5144         }
 5145         mtx_unlock(&xsoftc.xpt_topo_lock);
 5146         return (bus);
 5147 }
 5148 
 5149 static struct cam_et *
 5150 xpt_find_target(struct cam_eb *bus, target_id_t target_id)
 5151 {
 5152         struct cam_et *target;
 5153 
 5154         for (target = TAILQ_FIRST(&bus->et_entries);
 5155              target != NULL;
 5156              target = TAILQ_NEXT(target, links)) {
 5157                 if (target->target_id == target_id) {
 5158                         target->refcount++;
 5159                         break;
 5160                 }
 5161         }
 5162         return (target);
 5163 }
 5164 
 5165 static struct cam_ed *
 5166 xpt_find_device(struct cam_et *target, lun_id_t lun_id)
 5167 {
 5168         struct cam_ed *device;
 5169 
 5170         for (device = TAILQ_FIRST(&target->ed_entries);
 5171              device != NULL;
 5172              device = TAILQ_NEXT(device, links)) {
 5173                 if (device->lun_id == lun_id) {
 5174                         device->refcount++;
 5175                         break;
 5176                 }
 5177         }
 5178         return (device);
 5179 }
 5180 
 5181 typedef struct {
 5182         union   ccb *request_ccb;
 5183         struct  ccb_pathinq *cpi;
 5184         int     counter;
 5185 } xpt_scan_bus_info;
 5186 
 5187 /*
 5188  * To start a scan, request_ccb is an XPT_SCAN_BUS ccb.
 5189  * As the scan progresses, xpt_scan_bus is used as the
 5190  * callback on completion function.
 5191  */
 5192 static void
 5193 xpt_scan_bus(struct cam_periph *periph, union ccb *request_ccb)
 5194 {
 5195         CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE,
 5196                   ("xpt_scan_bus\n"));
 5197         switch (request_ccb->ccb_h.func_code) {
 5198         case XPT_SCAN_BUS:
 5199         {
 5200                 xpt_scan_bus_info *scan_info;
 5201                 union   ccb *work_ccb;
 5202                 struct  cam_path *path;
 5203                 u_int   i;
 5204                 u_int   max_target;
 5205                 u_int   initiator_id;
 5206 
 5207                 /* Find out the characteristics of the bus */
 5208                 work_ccb = xpt_alloc_ccb_nowait();
 5209                 if (work_ccb == NULL) {
 5210                         request_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
 5211                         xpt_done(request_ccb);
 5212                         return;
 5213                 }
 5214                 xpt_setup_ccb(&work_ccb->ccb_h, request_ccb->ccb_h.path,
 5215                               request_ccb->ccb_h.pinfo.priority);
 5216                 work_ccb->ccb_h.func_code = XPT_PATH_INQ;
 5217                 xpt_action(work_ccb);
 5218                 if (work_ccb->ccb_h.status != CAM_REQ_CMP) {
 5219                         request_ccb->ccb_h.status = work_ccb->ccb_h.status;
 5220                         xpt_free_ccb(work_ccb);
 5221                         xpt_done(request_ccb);
 5222                         return;
 5223                 }
 5224 
 5225                 if ((work_ccb->cpi.hba_misc & PIM_NOINITIATOR) != 0) {
 5226                         /*
 5227                          * Can't scan the bus on an adapter that
 5228                          * cannot perform the initiator role.
 5229                          */
 5230                         request_ccb->ccb_h.status = CAM_REQ_CMP;
 5231                         xpt_free_ccb(work_ccb);
 5232                         xpt_done(request_ccb);
 5233                         return;
 5234                 }
 5235 
 5236                 /* Save some state for use while we probe for devices */
 5237                 scan_info = (xpt_scan_bus_info *)
 5238                     malloc(sizeof(xpt_scan_bus_info), M_CAMXPT, M_NOWAIT);
 5239                 scan_info->request_ccb = request_ccb;
 5240                 scan_info->cpi = &work_ccb->cpi;
 5241 
 5242                 /* Cache on our stack so we can work asynchronously */
 5243                 max_target = scan_info->cpi->max_target;
 5244                 initiator_id = scan_info->cpi->initiator_id;
 5245 
 5246 
 5247                 /*
 5248                  * We can scan all targets in parallel, or do it sequentially.
 5249                  */
 5250                 if (scan_info->cpi->hba_misc & PIM_SEQSCAN) {
 5251                         max_target = 0;
 5252                         scan_info->counter = 0;
 5253                 } else {
 5254                         scan_info->counter = scan_info->cpi->max_target + 1;
 5255                         if (scan_info->cpi->initiator_id < scan_info->counter) {
 5256                                 scan_info->counter--;
 5257                         }
 5258                 }
 5259 
 5260                 for (i = 0; i <= max_target; i++) {
 5261                         cam_status status;
 5262                         if (i == initiator_id)
 5263                                 continue;
 5264 
 5265                         status = xpt_create_path(&path, xpt_periph,
 5266                                                  request_ccb->ccb_h.path_id,
 5267                                                  i, 0);
 5268                         if (status != CAM_REQ_CMP) {
 5269                                 printf("xpt_scan_bus: xpt_create_path failed"
 5270                                        " with status %#x, bus scan halted\n",
 5271                                        status);
 5272                                 free(scan_info, M_CAMXPT);
 5273                                 request_ccb->ccb_h.status = status;
 5274                                 xpt_free_ccb(work_ccb);
 5275                                 xpt_done(request_ccb);
 5276                                 break;
 5277                         }
 5278                         work_ccb = xpt_alloc_ccb_nowait();
 5279                         if (work_ccb == NULL) {
 5280                                 free(scan_info, M_CAMXPT);
 5281                                 xpt_free_path(path);
 5282                                 request_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
 5283                                 xpt_done(request_ccb);
 5284                                 break;
 5285                         }
 5286                         xpt_setup_ccb(&work_ccb->ccb_h, path,
 5287                                       request_ccb->ccb_h.pinfo.priority);
 5288                         work_ccb->ccb_h.func_code = XPT_SCAN_LUN;
 5289                         work_ccb->ccb_h.cbfcnp = xpt_scan_bus;
 5290                         work_ccb->ccb_h.ppriv_ptr0 = scan_info;
 5291                         work_ccb->crcn.flags = request_ccb->crcn.flags;
 5292                         xpt_action(work_ccb);
 5293                 }
 5294                 break;
 5295         }
 5296         case XPT_SCAN_LUN:
 5297         {
 5298                 cam_status status;
 5299                 struct cam_path *path;
 5300                 xpt_scan_bus_info *scan_info;
 5301                 path_id_t path_id;
 5302                 target_id_t target_id;
 5303                 lun_id_t lun_id;
 5304 
 5305                 /* Reuse the same CCB to query if a device was really found */
 5306                 scan_info = (xpt_scan_bus_info *)request_ccb->ccb_h.ppriv_ptr0;
 5307                 xpt_setup_ccb(&request_ccb->ccb_h, request_ccb->ccb_h.path,
 5308                               request_ccb->ccb_h.pinfo.priority);
 5309                 request_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
 5310 
 5311                 path_id = request_ccb->ccb_h.path_id;
 5312                 target_id = request_ccb->ccb_h.target_id;
 5313                 lun_id = request_ccb->ccb_h.target_lun;
 5314                 xpt_action(request_ccb);
 5315 
 5316                 if (request_ccb->ccb_h.status != CAM_REQ_CMP) {
 5317                         struct cam_ed *device;
 5318                         struct cam_et *target;
 5319                         int phl;
 5320 
 5321                         /*
 5322                          * If we already probed lun 0 successfully, or
 5323                          * we have additional configured luns on this
 5324                          * target that might have "gone away", go onto
 5325                          * the next lun.
 5326                          */
 5327                         target = request_ccb->ccb_h.path->target;
 5328                         /*
 5329                          * We may touch devices that we don't
 5330                          * hold references too, so ensure they
 5331                          * don't disappear out from under us.
 5332                          * The target above is referenced by the
 5333                          * path in the request ccb.
 5334                          */
 5335                         phl = 0;
 5336                         device = TAILQ_FIRST(&target->ed_entries);
 5337                         if (device != NULL) {
 5338                                 phl = CAN_SRCH_HI_SPARSE(device);
 5339                                 if (device->lun_id == 0)
 5340                                         device = TAILQ_NEXT(device, links);
 5341                         }
 5342                         if ((lun_id != 0) || (device != NULL)) {
 5343                                 if (lun_id < (CAM_SCSI2_MAXLUN-1) || phl)
 5344                                         lun_id++;
 5345                         }
 5346                 } else {
 5347                         struct cam_ed *device;
 5348 
 5349                         device = request_ccb->ccb_h.path->device;
 5350 
 5351                         if ((device->quirk->quirks & CAM_QUIRK_NOLUNS) == 0) {
 5352                                 /* Try the next lun */
 5353                                 if (lun_id < (CAM_SCSI2_MAXLUN-1)
 5354                                   || CAN_SRCH_HI_DENSE(device))
 5355                                         lun_id++;
 5356                         }
 5357                 }
 5358 
 5359                 /*
 5360                  * Free the current request path- we're done with it.
 5361                  */
 5362                 xpt_free_path(request_ccb->ccb_h.path);
 5363 
 5364                 /*
 5365                  * Check to see if we scan any further luns.
 5366                  */
 5367                 if (lun_id == request_ccb->ccb_h.target_lun
 5368                  || lun_id > scan_info->cpi->max_lun) {
 5369                         int done;
 5370 
 5371  hop_again:
 5372                         done = 0;
 5373                         if (scan_info->cpi->hba_misc & PIM_SEQSCAN) {
 5374                                 scan_info->counter++;
 5375                                 if (scan_info->counter ==
 5376                                     scan_info->cpi->initiator_id) {
 5377                                         scan_info->counter++;
 5378                                 }
 5379                                 if (scan_info->counter >=
 5380                                     scan_info->cpi->max_target+1) {
 5381                                         done = 1;
 5382                                 }
 5383                         } else {
 5384                                 scan_info->counter--;
 5385                                 if (scan_info->counter == 0) {
 5386                                         done = 1;
 5387                                 }
 5388                         }
 5389                         if (done) {
 5390                                 xpt_free_ccb(request_ccb);
 5391                                 xpt_free_ccb((union ccb *)scan_info->cpi);
 5392                                 request_ccb = scan_info->request_ccb;
 5393                                 free(scan_info, M_CAMXPT);
 5394                                 request_ccb->ccb_h.status = CAM_REQ_CMP;
 5395                                 xpt_done(request_ccb);
 5396                                 break;
 5397                         }
 5398 
 5399                         if ((scan_info->cpi->hba_misc & PIM_SEQSCAN) == 0) {
 5400                                 break;
 5401                         }
 5402                         status = xpt_create_path(&path, xpt_periph,
 5403                             scan_info->request_ccb->ccb_h.path_id,
 5404                             scan_info->counter, 0);
 5405                         if (status != CAM_REQ_CMP) {
 5406                                 printf("xpt_scan_bus: xpt_create_path failed"
 5407                                     " with status %#x, bus scan halted\n",
 5408                                     status);
 5409                                 xpt_free_ccb(request_ccb);
 5410                                 xpt_free_ccb((union ccb *)scan_info->cpi);
 5411                                 request_ccb = scan_info->request_ccb;
 5412                                 free(scan_info, M_CAMXPT);
 5413                                 request_ccb->ccb_h.status = status;
 5414                                 xpt_done(request_ccb);
 5415                                 break;
 5416                         }
 5417                         xpt_setup_ccb(&request_ccb->ccb_h, path,
 5418                             request_ccb->ccb_h.pinfo.priority);
 5419                         request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
 5420                         request_ccb->ccb_h.cbfcnp = xpt_scan_bus;
 5421                         request_ccb->ccb_h.ppriv_ptr0 = scan_info;
 5422                         request_ccb->crcn.flags =
 5423                             scan_info->request_ccb->crcn.flags;
 5424                 } else {
 5425                         status = xpt_create_path(&path, xpt_periph,
 5426                                                  path_id, target_id, lun_id);
 5427                         if (status != CAM_REQ_CMP) {
 5428                                 printf("xpt_scan_bus: xpt_create_path failed "
 5429                                        "with status %#x, halting LUN scan\n",
 5430                                        status);
 5431                                 goto hop_again;
 5432                         }
 5433                         xpt_setup_ccb(&request_ccb->ccb_h, path,
 5434                                       request_ccb->ccb_h.pinfo.priority);
 5435                         request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
 5436                         request_ccb->ccb_h.cbfcnp = xpt_scan_bus;
 5437                         request_ccb->ccb_h.ppriv_ptr0 = scan_info;
 5438                         request_ccb->crcn.flags =
 5439                                 scan_info->request_ccb->crcn.flags;
 5440                 }
 5441                 xpt_action(request_ccb);
 5442                 break;
 5443         }
 5444         default:
 5445                 break;
 5446         }
 5447 }
 5448 
 5449 typedef enum {
 5450         PROBE_TUR,
 5451         PROBE_INQUIRY,  /* this counts as DV0 for Basic Domain Validation */
 5452         PROBE_FULL_INQUIRY,
 5453         PROBE_MODE_SENSE,
 5454         PROBE_SERIAL_NUM_0,
 5455         PROBE_SERIAL_NUM_1,
 5456         PROBE_TUR_FOR_NEGOTIATION,
 5457         PROBE_INQUIRY_BASIC_DV1,
 5458         PROBE_INQUIRY_BASIC_DV2,
 5459         PROBE_DV_EXIT
 5460 } probe_action;
 5461 
 5462 typedef enum {
 5463         PROBE_INQUIRY_CKSUM     = 0x01,
 5464         PROBE_SERIAL_CKSUM      = 0x02,
 5465         PROBE_NO_ANNOUNCE       = 0x04
 5466 } probe_flags;
 5467 
 5468 typedef struct {
 5469         TAILQ_HEAD(, ccb_hdr) request_ccbs;
 5470         probe_action    action;
 5471         union ccb       saved_ccb;
 5472         probe_flags     flags;
 5473         MD5_CTX         context;
 5474         u_int8_t        digest[16];
 5475 } probe_softc;
 5476 
 5477 static void
 5478 xpt_scan_lun(struct cam_periph *periph, struct cam_path *path,
 5479              cam_flags flags, union ccb *request_ccb)
 5480 {
 5481         struct ccb_pathinq cpi;
 5482         cam_status status;
 5483         struct cam_path *new_path;
 5484         struct cam_periph *old_periph;
 5485 
 5486         CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE,
 5487                   ("xpt_scan_lun\n"));
 5488 
 5489         xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
 5490         cpi.ccb_h.func_code = XPT_PATH_INQ;
 5491         xpt_action((union ccb *)&cpi);
 5492 
 5493         if (cpi.ccb_h.status != CAM_REQ_CMP) {
 5494                 if (request_ccb != NULL) {
 5495                         request_ccb->ccb_h.status = cpi.ccb_h.status;
 5496                         xpt_done(request_ccb);
 5497                 }
 5498                 return;
 5499         }
 5500 
 5501         if ((cpi.hba_misc & PIM_NOINITIATOR) != 0) {
 5502                 /*
 5503                  * Can't scan the bus on an adapter that
 5504                  * cannot perform the initiator role.
 5505                  */
 5506                 if (request_ccb != NULL) {
 5507                         request_ccb->ccb_h.status = CAM_REQ_CMP;
 5508                         xpt_done(request_ccb);
 5509                 }
 5510                 return;
 5511         }
 5512 
 5513         if (request_ccb == NULL) {
 5514                 request_ccb = malloc(sizeof(union ccb), M_CAMXPT, M_NOWAIT);
 5515                 if (request_ccb == NULL) {
 5516                         xpt_print(path, "xpt_scan_lun: can't allocate CCB, "
 5517                             "can't continue\n");
 5518                         return;
 5519                 }
 5520                 new_path = malloc(sizeof(*new_path), M_CAMXPT, M_NOWAIT);
 5521                 if (new_path == NULL) {
 5522                         xpt_print(path, "xpt_scan_lun: can't allocate path, "
 5523                             "can't continue\n");
 5524                         free(request_ccb, M_CAMXPT);
 5525                         return;
 5526                 }
 5527                 status = xpt_compile_path(new_path, xpt_periph,
 5528                                           path->bus->path_id,
 5529                                           path->target->target_id,
 5530                                           path->device->lun_id);
 5531 
 5532                 if (status != CAM_REQ_CMP) {
 5533                         xpt_print(path, "xpt_scan_lun: can't compile path, "
 5534                             "can't continue\n");
 5535                         free(request_ccb, M_CAMXPT);
 5536                         free(new_path, M_CAMXPT);
 5537                         return;
 5538                 }
 5539                 xpt_setup_ccb(&request_ccb->ccb_h, new_path, /*priority*/ 1);
 5540                 request_ccb->ccb_h.cbfcnp = xptscandone;
 5541                 request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
 5542                 request_ccb->crcn.flags = flags;
 5543         }
 5544 
 5545         if ((old_periph = cam_periph_find(path, "probe")) != NULL) {
 5546                 probe_softc *softc;
 5547 
 5548                 softc = (probe_softc *)old_periph->softc;
 5549                 TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h,
 5550                                   periph_links.tqe);
 5551         } else {
 5552                 status = cam_periph_alloc(proberegister, NULL, probecleanup,
 5553                                           probestart, "probe",
 5554                                           CAM_PERIPH_BIO,
 5555                                           request_ccb->ccb_h.path, NULL, 0,
 5556                                           request_ccb);
 5557 
 5558                 if (status != CAM_REQ_CMP) {
 5559                         xpt_print(path, "xpt_scan_lun: cam_alloc_periph "
 5560                             "returned an error, can't continue probe\n");
 5561                         request_ccb->ccb_h.status = status;
 5562                         xpt_done(request_ccb);
 5563                 }
 5564         }
 5565 }
 5566 
 5567 static void
 5568 xptscandone(struct cam_periph *periph, union ccb *done_ccb)
 5569 {
 5570         xpt_release_path(done_ccb->ccb_h.path);
 5571         free(done_ccb->ccb_h.path, M_CAMXPT);
 5572         free(done_ccb, M_CAMXPT);
 5573 }
 5574 
 5575 static cam_status
 5576 proberegister(struct cam_periph *periph, void *arg)
 5577 {
 5578         union ccb *request_ccb; /* CCB representing the probe request */
 5579         cam_status status;
 5580         probe_softc *softc;
 5581 
 5582         request_ccb = (union ccb *)arg;
 5583         if (periph == NULL) {
 5584                 printf("proberegister: periph was NULL!!\n");
 5585                 return(CAM_REQ_CMP_ERR);
 5586         }
 5587 
 5588         if (request_ccb == NULL) {
 5589                 printf("proberegister: no probe CCB, "
 5590                        "can't register device\n");
 5591                 return(CAM_REQ_CMP_ERR);
 5592         }
 5593 
 5594         softc = (probe_softc *)malloc(sizeof(*softc), M_CAMXPT, M_NOWAIT);
 5595 
 5596         if (softc == NULL) {
 5597                 printf("proberegister: Unable to probe new device. "
 5598                        "Unable to allocate softc\n");
 5599                 return(CAM_REQ_CMP_ERR);
 5600         }
 5601         TAILQ_INIT(&softc->request_ccbs);
 5602         TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h,
 5603                           periph_links.tqe);
 5604         softc->flags = 0;
 5605         periph->softc = softc;
 5606         status = cam_periph_acquire(periph);
 5607         if (status != CAM_REQ_CMP) {
 5608                 return (status);
 5609         }
 5610 
 5611 
 5612         /*
 5613          * Ensure we've waited at least a bus settle
 5614          * delay before attempting to probe the device.
 5615          * For HBAs that don't do bus resets, this won't make a difference.
 5616          */
 5617         cam_periph_freeze_after_event(periph, &periph->path->bus->last_reset,
 5618                                       scsi_delay);
 5619         probeschedule(periph);
 5620         return(CAM_REQ_CMP);
 5621 }
 5622 
 5623 static void
 5624 probeschedule(struct cam_periph *periph)
 5625 {
 5626         struct ccb_pathinq cpi;
 5627         union ccb *ccb;
 5628         probe_softc *softc;
 5629 
 5630         softc = (probe_softc *)periph->softc;
 5631         ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs);
 5632 
 5633         xpt_setup_ccb(&cpi.ccb_h, periph->path, /*priority*/1);
 5634         cpi.ccb_h.func_code = XPT_PATH_INQ;
 5635         xpt_action((union ccb *)&cpi);
 5636 
 5637         /*
 5638          * If a device has gone away and another device, or the same one,
 5639          * is back in the same place, it should have a unit attention
 5640          * condition pending.  It will not report the unit attention in
 5641          * response to an inquiry, which may leave invalid transfer
 5642          * negotiations in effect.  The TUR will reveal the unit attention
 5643          * condition.  Only send the TUR for lun 0, since some devices
 5644          * will get confused by commands other than inquiry to non-existent
 5645          * luns.  If you think a device has gone away start your scan from
 5646          * lun 0.  This will insure that any bogus transfer settings are
 5647          * invalidated.
 5648          *
 5649          * If we haven't seen the device before and the controller supports
 5650          * some kind of transfer negotiation, negotiate with the first
 5651          * sent command if no bus reset was performed at startup.  This
 5652          * ensures that the device is not confused by transfer negotiation
 5653          * settings left over by loader or BIOS action.
 5654          */
 5655         if (((ccb->ccb_h.path->device->flags & CAM_DEV_UNCONFIGURED) == 0)
 5656          && (ccb->ccb_h.target_lun == 0)) {
 5657                 softc->action = PROBE_TUR;
 5658         } else if ((cpi.hba_inquiry & (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE)) != 0
 5659               && (cpi.hba_misc & PIM_NOBUSRESET) != 0) {
 5660                 proberequestdefaultnegotiation(periph);
 5661                 softc->action = PROBE_INQUIRY;
 5662         } else {
 5663                 softc->action = PROBE_INQUIRY;
 5664         }
 5665 
 5666         if (ccb->crcn.flags & CAM_EXPECT_INQ_CHANGE)
 5667                 softc->flags |= PROBE_NO_ANNOUNCE;
 5668         else
 5669                 softc->flags &= ~PROBE_NO_ANNOUNCE;
 5670 
 5671         xpt_schedule(periph, ccb->ccb_h.pinfo.priority);
 5672 }
 5673 
 5674 static void
 5675 probestart(struct cam_periph *periph, union ccb *start_ccb)
 5676 {
 5677         /* Probe the device that our peripheral driver points to */
 5678         struct ccb_scsiio *csio;
 5679         probe_softc *softc;
 5680 
 5681         CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probestart\n"));
 5682 
 5683         softc = (probe_softc *)periph->softc;
 5684         csio = &start_ccb->csio;
 5685 
 5686         switch (softc->action) {
 5687         case PROBE_TUR:
 5688         case PROBE_TUR_FOR_NEGOTIATION:
 5689         case PROBE_DV_EXIT:
 5690         {
 5691                 scsi_test_unit_ready(csio,
 5692                                      /*retries*/4,
 5693                                      probedone,
 5694                                      MSG_SIMPLE_Q_TAG,
 5695                                      SSD_FULL_SIZE,
 5696                                      /*timeout*/60000);
 5697                 break;
 5698         }
 5699         case PROBE_INQUIRY:
 5700         case PROBE_FULL_INQUIRY:
 5701         case PROBE_INQUIRY_BASIC_DV1:
 5702         case PROBE_INQUIRY_BASIC_DV2:
 5703         {
 5704                 u_int inquiry_len;
 5705                 struct scsi_inquiry_data *inq_buf;
 5706 
 5707                 inq_buf = &periph->path->device->inq_data;
 5708 
 5709                 /*
 5710                  * If the device is currently configured, we calculate an
 5711                  * MD5 checksum of the inquiry data, and if the serial number
 5712                  * length is greater than 0, add the serial number data
 5713                  * into the checksum as well.  Once the inquiry and the
 5714                  * serial number check finish, we attempt to figure out
 5715                  * whether we still have the same device.
 5716                  */
 5717                 if ((periph->path->device->flags & CAM_DEV_UNCONFIGURED) == 0) {
 5718 
 5719                         MD5Init(&softc->context);
 5720                         MD5Update(&softc->context, (unsigned char *)inq_buf,
 5721                                   sizeof(struct scsi_inquiry_data));
 5722                         softc->flags |= PROBE_INQUIRY_CKSUM;
 5723                         if (periph->path->device->serial_num_len > 0) {
 5724                                 MD5Update(&softc->context,
 5725                                           periph->path->device->serial_num,
 5726                                           periph->path->device->serial_num_len);
 5727                                 softc->flags |= PROBE_SERIAL_CKSUM;
 5728                         }
 5729                         MD5Final(softc->digest, &softc->context);
 5730                 }
 5731 
 5732                 if (softc->action == PROBE_INQUIRY)
 5733                         inquiry_len = SHORT_INQUIRY_LENGTH;
 5734                 else
 5735                         inquiry_len = SID_ADDITIONAL_LENGTH(inq_buf);
 5736 
 5737                 /*
 5738                  * Some parallel SCSI devices fail to send an
 5739                  * ignore wide residue message when dealing with
 5740                  * odd length inquiry requests.  Round up to be
 5741                  * safe.
 5742                  */
 5743                 inquiry_len = roundup2(inquiry_len, 2);
 5744 
 5745                 if (softc->action == PROBE_INQUIRY_BASIC_DV1
 5746                  || softc->action == PROBE_INQUIRY_BASIC_DV2) {
 5747                         inq_buf = malloc(inquiry_len, M_CAMXPT, M_NOWAIT);
 5748                 }
 5749                 if (inq_buf == NULL) {
 5750                         xpt_print(periph->path, "malloc failure- skipping Basic"
 5751                             "Domain Validation\n");
 5752                         softc->action = PROBE_DV_EXIT;
 5753                         scsi_test_unit_ready(csio,
 5754                                              /*retries*/4,
 5755                                              probedone,
 5756                                              MSG_SIMPLE_Q_TAG,
 5757                                              SSD_FULL_SIZE,
 5758                                              /*timeout*/60000);
 5759                         break;
 5760                 }
 5761                 scsi_inquiry(csio,
 5762                              /*retries*/4,
 5763                              probedone,
 5764                              MSG_SIMPLE_Q_TAG,
 5765                              (u_int8_t *)inq_buf,
 5766                              inquiry_len,
 5767                              /*evpd*/FALSE,
 5768                              /*page_code*/0,
 5769                              SSD_MIN_SIZE,
 5770                              /*timeout*/60 * 1000);
 5771                 break;
 5772         }
 5773         case PROBE_MODE_SENSE:
 5774         {
 5775                 void  *mode_buf;
 5776                 int    mode_buf_len;
 5777 
 5778                 mode_buf_len = sizeof(struct scsi_mode_header_6)
 5779                              + sizeof(struct scsi_mode_blk_desc)
 5780                              + sizeof(struct scsi_control_page);
 5781                 mode_buf = malloc(mode_buf_len, M_CAMXPT, M_NOWAIT);
 5782                 if (mode_buf != NULL) {
 5783                         scsi_mode_sense(csio,
 5784                                         /*retries*/4,
 5785                                         probedone,
 5786                                         MSG_SIMPLE_Q_TAG,
 5787                                         /*dbd*/FALSE,
 5788                                         SMS_PAGE_CTRL_CURRENT,
 5789                                         SMS_CONTROL_MODE_PAGE,
 5790                                         mode_buf,
 5791                                         mode_buf_len,
 5792                                         SSD_FULL_SIZE,
 5793                                         /*timeout*/60000);
 5794                         break;
 5795                 }
 5796                 xpt_print(periph->path, "Unable to mode sense control page - "
 5797                     "malloc failure\n");
 5798                 softc->action = PROBE_SERIAL_NUM_0;
 5799         }
 5800         /* FALLTHROUGH */
 5801         case PROBE_SERIAL_NUM_0:
 5802         {
 5803                 struct scsi_vpd_supported_page_list *vpd_list = NULL;
 5804                 struct cam_ed *device;
 5805 
 5806                 device = periph->path->device;
 5807                 if ((device->quirk->quirks & CAM_QUIRK_NOSERIAL) == 0) {
 5808                         vpd_list = malloc(sizeof(*vpd_list), M_CAMXPT,
 5809                             M_NOWAIT | M_ZERO);
 5810                 }
 5811 
 5812                 if (vpd_list != NULL) {
 5813                         scsi_inquiry(csio,
 5814                                      /*retries*/4,
 5815                                      probedone,
 5816                                      MSG_SIMPLE_Q_TAG,
 5817                                      (u_int8_t *)vpd_list,
 5818                                      sizeof(*vpd_list),
 5819                                      /*evpd*/TRUE,
 5820                                      SVPD_SUPPORTED_PAGE_LIST,
 5821                                      SSD_MIN_SIZE,
 5822                                      /*timeout*/60 * 1000);
 5823                         break;
 5824                 }
 5825                 /*
 5826                  * We'll have to do without, let our probedone
 5827                  * routine finish up for us.
 5828                  */
 5829                 start_ccb->csio.data_ptr = NULL;
 5830                 probedone(periph, start_ccb);
 5831                 return;
 5832         }
 5833         case PROBE_SERIAL_NUM_1:
 5834         {
 5835                 struct scsi_vpd_unit_serial_number *serial_buf;
 5836                 struct cam_ed* device;
 5837 
 5838                 serial_buf = NULL;
 5839                 device = periph->path->device;
 5840                 device->serial_num = NULL;
 5841                 device->serial_num_len = 0;
 5842 
 5843                 serial_buf = (struct scsi_vpd_unit_serial_number *)
 5844                         malloc(sizeof(*serial_buf), M_CAMXPT, M_NOWAIT|M_ZERO);
 5845 
 5846                 if (serial_buf != NULL) {
 5847                         scsi_inquiry(csio,
 5848                                      /*retries*/4,
 5849                                      probedone,
 5850                                      MSG_SIMPLE_Q_TAG,
 5851                                      (u_int8_t *)serial_buf,
 5852                                      sizeof(*serial_buf),
 5853                                      /*evpd*/TRUE,
 5854                                      SVPD_UNIT_SERIAL_NUMBER,
 5855                                      SSD_MIN_SIZE,
 5856                                      /*timeout*/60 * 1000);
 5857                         break;
 5858                 }
 5859                 /*
 5860                  * We'll have to do without, let our probedone
 5861                  * routine finish up for us.
 5862                  */
 5863                 start_ccb->csio.data_ptr = NULL;
 5864                 probedone(periph, start_ccb);
 5865                 return;
 5866         }
 5867         }
 5868         xpt_action(start_ccb);
 5869 }
 5870 
 5871 static void
 5872 proberequestdefaultnegotiation(struct cam_periph *periph)
 5873 {
 5874         struct ccb_trans_settings cts;
 5875 
 5876         xpt_setup_ccb(&cts.ccb_h, periph->path, /*priority*/1);
 5877         cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
 5878         cts.type = CTS_TYPE_USER_SETTINGS;
 5879         xpt_action((union ccb *)&cts);
 5880         if ((cts.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
 5881                 return;
 5882         }
 5883         cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
 5884         cts.type = CTS_TYPE_CURRENT_SETTINGS;
 5885         xpt_action((union ccb *)&cts);
 5886 }
 5887 
 5888 /*
 5889  * Backoff Negotiation Code- only pertinent for SPI devices.
 5890  */
 5891 static int
 5892 proberequestbackoff(struct cam_periph *periph, struct cam_ed *device)
 5893 {
 5894         struct ccb_trans_settings cts;
 5895         struct ccb_trans_settings_spi *spi;
 5896 
 5897         memset(&cts, 0, sizeof (cts));
 5898         xpt_setup_ccb(&cts.ccb_h, periph->path, /*priority*/1);
 5899         cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
 5900         cts.type = CTS_TYPE_CURRENT_SETTINGS;
 5901         xpt_action((union ccb *)&cts);
 5902         if ((cts.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
 5903                 if (bootverbose) {
 5904                         xpt_print(periph->path,
 5905                             "failed to get current device settings\n");
 5906                 }
 5907                 return (0);
 5908         }
 5909         if (cts.transport != XPORT_SPI) {
 5910                 if (bootverbose) {
 5911                         xpt_print(periph->path, "not SPI transport\n");
 5912                 }
 5913                 return (0);
 5914         }
 5915         spi = &cts.xport_specific.spi;
 5916 
 5917         /*
 5918          * We cannot renegotiate sync rate if we don't have one.
 5919          */
 5920         if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0) {
 5921                 if (bootverbose) {
 5922                         xpt_print(periph->path, "no sync rate known\n");
 5923                 }
 5924                 return (0);
 5925         }
 5926 
 5927         /*
 5928          * We'll assert that we don't have to touch PPR options- the
 5929          * SIM will see what we do with period and offset and adjust
 5930          * the PPR options as appropriate.
 5931          */
 5932 
 5933         /*
 5934          * A sync rate with unknown or zero offset is nonsensical.
 5935          * A sync period of zero means Async.
 5936          */
 5937         if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0
 5938          || spi->sync_offset == 0 || spi->sync_period == 0) {
 5939                 if (bootverbose) {
 5940                         xpt_print(periph->path, "no sync rate available\n");
 5941                 }
 5942                 return (0);
 5943         }
 5944 
 5945         if (device->flags & CAM_DEV_DV_HIT_BOTTOM) {
 5946                 CAM_DEBUG(periph->path, CAM_DEBUG_INFO,
 5947                     ("hit async: giving up on DV\n"));
 5948                 return (0);
 5949         }
 5950 
 5951 
 5952         /*
 5953          * Jump sync_period up by one, but stop at 5MHz and fall back to Async.
 5954          * We don't try to remember 'last' settings to see if the SIM actually
 5955          * gets into the speed we want to set. We check on the SIM telling
 5956          * us that a requested speed is bad, but otherwise don't try and
 5957          * check the speed due to the asynchronous and handshake nature
 5958          * of speed setting.
 5959          */
 5960         spi->valid = CTS_SPI_VALID_SYNC_RATE | CTS_SPI_VALID_SYNC_OFFSET;
 5961         for (;;) {
 5962                 spi->sync_period++;
 5963                 if (spi->sync_period >= 0xf) {
 5964                         spi->sync_period = 0;
 5965                         spi->sync_offset = 0;
 5966                         CAM_DEBUG(periph->path, CAM_DEBUG_INFO,
 5967                             ("setting to async for DV\n"));
 5968                         /*
 5969                          * Once we hit async, we don't want to try
 5970                          * any more settings.
 5971                          */
 5972                         device->flags |= CAM_DEV_DV_HIT_BOTTOM;
 5973                 } else if (bootverbose) {
 5974                         CAM_DEBUG(periph->path, CAM_DEBUG_INFO,
 5975                             ("DV: period 0x%x\n", spi->sync_period));
 5976                         printf("setting period to 0x%x\n", spi->sync_period);
 5977                 }
 5978                 cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
 5979                 cts.type = CTS_TYPE_CURRENT_SETTINGS;
 5980                 xpt_action((union ccb *)&cts);
 5981                 if ((cts.ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
 5982                         break;
 5983                 }
 5984                 CAM_DEBUG(periph->path, CAM_DEBUG_INFO,
 5985                     ("DV: failed to set period 0x%x\n", spi->sync_period));
 5986                 if (spi->sync_period == 0) {
 5987                         return (0);
 5988                 }
 5989         }
 5990         return (1);
 5991 }
 5992 
 5993 static void
 5994 probedone(struct cam_periph *periph, union ccb *done_ccb)
 5995 {
 5996         probe_softc *softc;
 5997         struct cam_path *path;
 5998         u_int32_t  priority;
 5999 
 6000         CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probedone\n"));
 6001 
 6002         softc = (probe_softc *)periph->softc;
 6003         path = done_ccb->ccb_h.path;
 6004         priority = done_ccb->ccb_h.pinfo.priority;
 6005 
 6006         switch (softc->action) {
 6007         case PROBE_TUR:
 6008         {
 6009                 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
 6010 
 6011                         if (cam_periph_error(done_ccb, 0,
 6012                                              SF_NO_PRINT, NULL) == ERESTART)
 6013                                 return;
 6014                         else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
 6015                                 /* Don't wedge the queue */
 6016                                 xpt_release_devq(done_ccb->ccb_h.path,
 6017                                                  /*count*/1,
 6018                                                  /*run_queue*/TRUE);
 6019                 }
 6020                 softc->action = PROBE_INQUIRY;
 6021                 xpt_release_ccb(done_ccb);
 6022                 xpt_schedule(periph, priority);
 6023                 return;
 6024         }
 6025         case PROBE_INQUIRY:
 6026         case PROBE_FULL_INQUIRY:
 6027         {
 6028                 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
 6029                         struct scsi_inquiry_data *inq_buf;
 6030                         u_int8_t periph_qual;
 6031 
 6032                         path->device->flags |= CAM_DEV_INQUIRY_DATA_VALID;
 6033                         inq_buf = &path->device->inq_data;
 6034 
 6035                         periph_qual = SID_QUAL(inq_buf);
 6036 
 6037                         switch(periph_qual) {
 6038                         case SID_QUAL_LU_CONNECTED:
 6039                         {
 6040                                 u_int8_t len;
 6041 
 6042                                 /*
 6043                                  * We conservatively request only
 6044                                  * SHORT_INQUIRY_LEN bytes of inquiry
 6045                                  * information during our first try
 6046                                  * at sending an INQUIRY. If the device
 6047                                  * has more information to give,
 6048                                  * perform a second request specifying
 6049                                  * the amount of information the device
 6050                                  * is willing to give.
 6051                                  */
 6052                                 len = inq_buf->additional_length
 6053                                     + offsetof(struct scsi_inquiry_data,
 6054                                                additional_length) + 1;
 6055                                 if (softc->action == PROBE_INQUIRY
 6056                                     && len > SHORT_INQUIRY_LENGTH) {
 6057                                         softc->action = PROBE_FULL_INQUIRY;
 6058                                         xpt_release_ccb(done_ccb);
 6059                                         xpt_schedule(periph, priority);
 6060                                         return;
 6061                                 }
 6062 
 6063                                 xpt_find_quirk(path->device);
 6064 
 6065                                 xpt_devise_transport(path);
 6066                                 if (INQ_DATA_TQ_ENABLED(inq_buf))
 6067                                         softc->action = PROBE_MODE_SENSE;
 6068                                 else
 6069                                         softc->action = PROBE_SERIAL_NUM_0;
 6070 
 6071                                 path->device->flags &= ~CAM_DEV_UNCONFIGURED;
 6072 
 6073                                 xpt_release_ccb(done_ccb);
 6074                                 xpt_schedule(periph, priority);
 6075                                 return;
 6076                         }
 6077                         default:
 6078                                 break;
 6079                         }
 6080                 } else if (cam_periph_error(done_ccb, 0,
 6081                                             done_ccb->ccb_h.target_lun > 0
 6082                                             ? SF_RETRY_UA|SF_QUIET_IR
 6083                                             : SF_RETRY_UA,
 6084                                             &softc->saved_ccb) == ERESTART) {
 6085                         return;
 6086                 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
 6087                         /* Don't wedge the queue */
 6088                         xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
 6089                                          /*run_queue*/TRUE);
 6090                 }
 6091                 /*
 6092                  * If we get to this point, we got an error status back
 6093                  * from the inquiry and the error status doesn't require
 6094                  * automatically retrying the command.  Therefore, the
 6095                  * inquiry failed.  If we had inquiry information before
 6096                  * for this device, but this latest inquiry command failed,
 6097                  * the device has probably gone away.  If this device isn't
 6098                  * already marked unconfigured, notify the peripheral
 6099                  * drivers that this device is no more.
 6100                  */
 6101                 if ((path->device->flags & CAM_DEV_UNCONFIGURED) == 0)
 6102                         /* Send the async notification. */
 6103                         xpt_async(AC_LOST_DEVICE, path, NULL);
 6104 
 6105                 xpt_release_ccb(done_ccb);
 6106                 break;
 6107         }
 6108         case PROBE_MODE_SENSE:
 6109         {
 6110                 struct ccb_scsiio *csio;
 6111                 struct scsi_mode_header_6 *mode_hdr;
 6112 
 6113                 csio = &done_ccb->csio;
 6114                 mode_hdr = (struct scsi_mode_header_6 *)csio->data_ptr;
 6115                 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
 6116                         struct scsi_control_page *page;
 6117                         u_int8_t *offset;
 6118 
 6119                         offset = ((u_int8_t *)&mode_hdr[1])
 6120                             + mode_hdr->blk_desc_len;
 6121                         page = (struct scsi_control_page *)offset;
 6122                         path->device->queue_flags = page->queue_flags;
 6123                 } else if (cam_periph_error(done_ccb, 0,
 6124                                             SF_RETRY_UA|SF_NO_PRINT,
 6125                                             &softc->saved_ccb) == ERESTART) {
 6126                         return;
 6127                 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
 6128                         /* Don't wedge the queue */
 6129                         xpt_release_devq(done_ccb->ccb_h.path,
 6130                                          /*count*/1, /*run_queue*/TRUE);
 6131                 }
 6132                 xpt_release_ccb(done_ccb);
 6133                 free(mode_hdr, M_CAMXPT);
 6134                 softc->action = PROBE_SERIAL_NUM_0;
 6135                 xpt_schedule(periph, priority);
 6136                 return;
 6137         }
 6138         case PROBE_SERIAL_NUM_0:
 6139         {
 6140                 struct ccb_scsiio *csio;
 6141                 struct scsi_vpd_supported_page_list *page_list;
 6142                 int length, serialnum_supported, i;
 6143 
 6144                 serialnum_supported = 0;
 6145                 csio = &done_ccb->csio;
 6146                 page_list =
 6147                     (struct scsi_vpd_supported_page_list *)csio->data_ptr;
 6148 
 6149                 if (page_list == NULL) {
 6150                         /*
 6151                          * Don't process the command as it was never sent
 6152                          */
 6153                 } else if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP
 6154                     && (page_list->length > 0)) {
 6155                         length = min(page_list->length,
 6156                             SVPD_SUPPORTED_PAGES_SIZE);
 6157                         for (i = 0; i < length; i++) {
 6158                                 if (page_list->list[i] ==
 6159                                     SVPD_UNIT_SERIAL_NUMBER) {
 6160                                         serialnum_supported = 1;
 6161                                         break;
 6162                                 }
 6163                         }
 6164                 } else if (cam_periph_error(done_ccb, 0,
 6165                                             SF_RETRY_UA|SF_NO_PRINT,
 6166                                             &softc->saved_ccb) == ERESTART) {
 6167                         return;
 6168                 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
 6169                         /* Don't wedge the queue */
 6170                         xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
 6171                                          /*run_queue*/TRUE);
 6172                 }
 6173 
 6174                 if (page_list != NULL)
 6175                         free(page_list, M_DEVBUF);
 6176 
 6177                 if (serialnum_supported) {
 6178                         xpt_release_ccb(done_ccb);
 6179                         softc->action = PROBE_SERIAL_NUM_1;
 6180                         xpt_schedule(periph, priority);
 6181                         return;
 6182                 }
 6183                 xpt_release_ccb(done_ccb);
 6184                 softc->action = PROBE_TUR_FOR_NEGOTIATION;
 6185                 xpt_schedule(periph, done_ccb->ccb_h.pinfo.priority);
 6186                 return;
 6187         }
 6188 
 6189         case PROBE_SERIAL_NUM_1:
 6190         {
 6191                 struct ccb_scsiio *csio;
 6192                 struct scsi_vpd_unit_serial_number *serial_buf;
 6193                 u_int32_t  priority;
 6194                 int changed;
 6195                 int have_serialnum;
 6196 
 6197                 changed = 1;
 6198                 have_serialnum = 0;
 6199                 csio = &done_ccb->csio;
 6200                 priority = done_ccb->ccb_h.pinfo.priority;
 6201                 serial_buf =
 6202                     (struct scsi_vpd_unit_serial_number *)csio->data_ptr;
 6203 
 6204                 /* Clean up from previous instance of this device */
 6205                 if (path->device->serial_num != NULL) {
 6206                         free(path->device->serial_num, M_CAMXPT);
 6207                         path->device->serial_num = NULL;
 6208                         path->device->serial_num_len = 0;
 6209                 }
 6210 
 6211                 if (serial_buf == NULL) {
 6212                         /*
 6213                          * Don't process the command as it was never sent
 6214                          */
 6215                 } else if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP
 6216                         && (serial_buf->length > 0)) {
 6217 
 6218                         have_serialnum = 1;
 6219                         path->device->serial_num =
 6220                                 (u_int8_t *)malloc((serial_buf->length + 1),
 6221                                                    M_CAMXPT, M_NOWAIT);
 6222                         if (path->device->serial_num != NULL) {
 6223                                 bcopy(serial_buf->serial_num,
 6224                                       path->device->serial_num,
 6225                                       serial_buf->length);
 6226                                 path->device->serial_num_len =
 6227                                     serial_buf->length;
 6228                                 path->device->serial_num[serial_buf->length]
 6229                                     = '\0';
 6230                         }
 6231                 } else if (cam_periph_error(done_ccb, 0,
 6232                                             SF_RETRY_UA|SF_NO_PRINT,
 6233                                             &softc->saved_ccb) == ERESTART) {
 6234                         return;
 6235                 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
 6236                         /* Don't wedge the queue */
 6237                         xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
 6238                                          /*run_queue*/TRUE);
 6239                 }
 6240 
 6241                 /*
 6242                  * Let's see if we have seen this device before.
 6243                  */
 6244                 if ((softc->flags & PROBE_INQUIRY_CKSUM) != 0) {
 6245                         MD5_CTX context;
 6246                         u_int8_t digest[16];
 6247 
 6248                         MD5Init(&context);
 6249 
 6250                         MD5Update(&context,
 6251                                   (unsigned char *)&path->device->inq_data,
 6252                                   sizeof(struct scsi_inquiry_data));
 6253 
 6254                         if (have_serialnum)
 6255                                 MD5Update(&context, serial_buf->serial_num,
 6256                                           serial_buf->length);
 6257 
 6258                         MD5Final(digest, &context);
 6259                         if (bcmp(softc->digest, digest, 16) == 0)
 6260                                 changed = 0;
 6261 
 6262                         /*
 6263                          * XXX Do we need to do a TUR in order to ensure
 6264                          *     that the device really hasn't changed???
 6265                          */
 6266                         if ((changed != 0)
 6267                          && ((softc->flags & PROBE_NO_ANNOUNCE) == 0))
 6268                                 xpt_async(AC_LOST_DEVICE, path, NULL);
 6269                 }
 6270                 if (serial_buf != NULL)
 6271                         free(serial_buf, M_CAMXPT);
 6272 
 6273                 if (changed != 0) {
 6274                         /*
 6275                          * Now that we have all the necessary
 6276                          * information to safely perform transfer
 6277                          * negotiations... Controllers don't perform
 6278                          * any negotiation or tagged queuing until
 6279                          * after the first XPT_SET_TRAN_SETTINGS ccb is
 6280                          * received.  So, on a new device, just retrieve
 6281                          * the user settings, and set them as the current
 6282                          * settings to set the device up.
 6283                          */
 6284                         proberequestdefaultnegotiation(periph);
 6285                         xpt_release_ccb(done_ccb);
 6286 
 6287                         /*
 6288                          * Perform a TUR to allow the controller to
 6289                          * perform any necessary transfer negotiation.
 6290                          */
 6291                         softc->action = PROBE_TUR_FOR_NEGOTIATION;
 6292                         xpt_schedule(periph, priority);
 6293                         return;
 6294                 }
 6295                 xpt_release_ccb(done_ccb);
 6296                 break;
 6297         }
 6298         case PROBE_TUR_FOR_NEGOTIATION:
 6299         case PROBE_DV_EXIT:
 6300                 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
 6301                         /* Don't wedge the queue */
 6302                         xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
 6303                                          /*run_queue*/TRUE);
 6304                 }
 6305                 /*
 6306                  * Do Domain Validation for lun 0 on devices that claim
 6307                  * to support Synchronous Transfer modes.
 6308                  */
 6309                 if (softc->action == PROBE_TUR_FOR_NEGOTIATION
 6310                  && done_ccb->ccb_h.target_lun == 0
 6311                  && (path->device->inq_data.flags & SID_Sync) != 0
 6312                  && (path->device->flags & CAM_DEV_IN_DV) == 0) {
 6313                         CAM_DEBUG(periph->path, CAM_DEBUG_INFO,
 6314                             ("Begin Domain Validation\n"));
 6315                         path->device->flags |= CAM_DEV_IN_DV;
 6316                         xpt_release_ccb(done_ccb);
 6317                         softc->action = PROBE_INQUIRY_BASIC_DV1;
 6318                         xpt_schedule(periph, priority);
 6319                         return;
 6320                 }
 6321                 if (softc->action == PROBE_DV_EXIT) {
 6322                         CAM_DEBUG(periph->path, CAM_DEBUG_INFO,
 6323                             ("Leave Domain Validation\n"));
 6324                 }
 6325                 path->device->flags &=
 6326                     ~(CAM_DEV_UNCONFIGURED|CAM_DEV_IN_DV|CAM_DEV_DV_HIT_BOTTOM);
 6327                 if ((softc->flags & PROBE_NO_ANNOUNCE) == 0) {
 6328                         /* Inform the XPT that a new device has been found */
 6329                         done_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
 6330                         xpt_action(done_ccb);
 6331                         xpt_async(AC_FOUND_DEVICE, done_ccb->ccb_h.path,
 6332                                   done_ccb);
 6333                 }
 6334                 xpt_release_ccb(done_ccb);
 6335                 break;
 6336         case PROBE_INQUIRY_BASIC_DV1:
 6337         case PROBE_INQUIRY_BASIC_DV2:
 6338         {
 6339                 struct scsi_inquiry_data *nbuf;
 6340                 struct ccb_scsiio *csio;
 6341 
 6342                 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
 6343                         /* Don't wedge the queue */
 6344                         xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
 6345                                          /*run_queue*/TRUE);
 6346                 }
 6347                 csio = &done_ccb->csio;
 6348                 nbuf = (struct scsi_inquiry_data *)csio->data_ptr;
 6349                 if (bcmp(nbuf, &path->device->inq_data, SHORT_INQUIRY_LENGTH)) {
 6350                         xpt_print(path,
 6351                             "inquiry data fails comparison at DV%d step\n",
 6352                             softc->action == PROBE_INQUIRY_BASIC_DV1 ? 1 : 2);
 6353                         if (proberequestbackoff(periph, path->device)) {
 6354                                 path->device->flags &= ~CAM_DEV_IN_DV;
 6355                                 softc->action = PROBE_TUR_FOR_NEGOTIATION;
 6356                         } else {
 6357                                 /* give up */
 6358                                 softc->action = PROBE_DV_EXIT;
 6359                         }
 6360                         free(nbuf, M_CAMXPT);
 6361                         xpt_release_ccb(done_ccb);
 6362                         xpt_schedule(periph, priority);
 6363                         return;
 6364                 }
 6365                 free(nbuf, M_CAMXPT);
 6366                 if (softc->action == PROBE_INQUIRY_BASIC_DV1) {
 6367                         softc->action = PROBE_INQUIRY_BASIC_DV2;
 6368                         xpt_release_ccb(done_ccb);
 6369                         xpt_schedule(periph, priority);
 6370                         return;
 6371                 }
 6372                 if (softc->action == PROBE_DV_EXIT) {
 6373                         CAM_DEBUG(periph->path, CAM_DEBUG_INFO,
 6374                             ("Leave Domain Validation Successfully\n"));
 6375                 }
 6376                 path->device->flags &=
 6377                     ~(CAM_DEV_UNCONFIGURED|CAM_DEV_IN_DV|CAM_DEV_DV_HIT_BOTTOM);
 6378                 if ((softc->flags & PROBE_NO_ANNOUNCE) == 0) {
 6379                         /* Inform the XPT that a new device has been found */
 6380                         done_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
 6381                         xpt_action(done_ccb);
 6382                         xpt_async(AC_FOUND_DEVICE, done_ccb->ccb_h.path,
 6383                                   done_ccb);
 6384                 }
 6385                 xpt_release_ccb(done_ccb);
 6386                 break;
 6387         }
 6388         }
 6389         done_ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs);
 6390         TAILQ_REMOVE(&softc->request_ccbs, &done_ccb->ccb_h, periph_links.tqe);
 6391         done_ccb->ccb_h.status = CAM_REQ_CMP;
 6392         xpt_done(done_ccb);
 6393         if (TAILQ_FIRST(&softc->request_ccbs) == NULL) {
 6394                 cam_periph_invalidate(periph);
 6395                 cam_periph_release(periph);
 6396         } else {
 6397                 probeschedule(periph);
 6398         }
 6399 }
 6400 
 6401 static void
 6402 probecleanup(struct cam_periph *periph)
 6403 {
 6404         free(periph->softc, M_CAMXPT);
 6405 }
 6406 
 6407 static void
 6408 xpt_find_quirk(struct cam_ed *device)
 6409 {
 6410         caddr_t match;
 6411 
 6412         match = cam_quirkmatch((caddr_t)&device->inq_data,
 6413                                (caddr_t)xpt_quirk_table,
 6414                                sizeof(xpt_quirk_table)/sizeof(*xpt_quirk_table),
 6415                                sizeof(*xpt_quirk_table), scsi_inquiry_match);
 6416 
 6417         if (match == NULL)
 6418                 panic("xpt_find_quirk: device didn't match wildcard entry!!");
 6419 
 6420         device->quirk = (struct xpt_quirk_entry *)match;
 6421 }
 6422 
 6423 static int
 6424 sysctl_cam_search_luns(SYSCTL_HANDLER_ARGS)
 6425 {
 6426         int error, bool;
 6427 
 6428         bool = cam_srch_hi;
 6429         error = sysctl_handle_int(oidp, &bool, 0, req);
 6430         if (error != 0 || req->newptr == NULL)
 6431                 return (error);
 6432         if (bool == 0 || bool == 1) {
 6433                 cam_srch_hi = bool;
 6434                 return (0);
 6435         } else {
 6436                 return (EINVAL);
 6437         }
 6438 }
 6439 
 6440 
 6441 static void
 6442 xpt_devise_transport(struct cam_path *path)
 6443 {
 6444         struct ccb_pathinq cpi;
 6445         struct ccb_trans_settings cts;
 6446         struct scsi_inquiry_data *inq_buf;
 6447 
 6448         /* Get transport information from the SIM */
 6449         xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
 6450         cpi.ccb_h.func_code = XPT_PATH_INQ;
 6451         xpt_action((union ccb *)&cpi);
 6452 
 6453         inq_buf = NULL;
 6454         if ((path->device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0)
 6455                 inq_buf = &path->device->inq_data;
 6456         path->device->protocol = PROTO_SCSI;
 6457         path->device->protocol_version =
 6458             inq_buf != NULL ? SID_ANSI_REV(inq_buf) : cpi.protocol_version;
 6459         path->device->transport = cpi.transport;
 6460         path->device->transport_version = cpi.transport_version;
 6461 
 6462         /*
 6463          * Any device not using SPI3 features should
 6464          * be considered SPI2 or lower.
 6465          */
 6466         if (inq_buf != NULL) {
 6467                 if (path->device->transport == XPORT_SPI
 6468                  && (inq_buf->spi3data & SID_SPI_MASK) == 0
 6469                  && path->device->transport_version > 2)
 6470                         path->device->transport_version = 2;
 6471         } else {
 6472                 struct cam_ed* otherdev;
 6473 
 6474                 for (otherdev = TAILQ_FIRST(&path->target->ed_entries);
 6475                      otherdev != NULL;
 6476                      otherdev = TAILQ_NEXT(otherdev, links)) {
 6477                         if (otherdev != path->device)
 6478                                 break;
 6479                 }
 6480 
 6481                 if (otherdev != NULL) {
 6482                         /*
 6483                          * Initially assume the same versioning as
 6484                          * prior luns for this target.
 6485                          */
 6486                         path->device->protocol_version =
 6487                             otherdev->protocol_version;
 6488                         path->device->transport_version =
 6489                             otherdev->transport_version;
 6490                 } else {
 6491                         /* Until we know better, opt for safty */
 6492                         path->device->protocol_version = 2;
 6493                         if (path->device->transport == XPORT_SPI)
 6494                                 path->device->transport_version = 2;
 6495                         else
 6496                                 path->device->transport_version = 0;
 6497                 }
 6498         }
 6499 
 6500         /*
 6501          * XXX
 6502          * For a device compliant with SPC-2 we should be able
 6503          * to determine the transport version supported by
 6504          * scrutinizing the version descriptors in the
 6505          * inquiry buffer.
 6506          */
 6507 
 6508         /* Tell the controller what we think */
 6509         xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1);
 6510         cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
 6511         cts.type = CTS_TYPE_CURRENT_SETTINGS;
 6512         cts.transport = path->device->transport;
 6513         cts.transport_version = path->device->transport_version;
 6514         cts.protocol = path->device->protocol;
 6515         cts.protocol_version = path->device->protocol_version;
 6516         cts.proto_specific.valid = 0;
 6517         cts.xport_specific.valid = 0;
 6518         xpt_action((union ccb *)&cts);
 6519 }
 6520 
 6521 static void
 6522 xpt_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_ed *device,
 6523                           int async_update)
 6524 {
 6525         struct  ccb_pathinq cpi;
 6526         struct  ccb_trans_settings cur_cts;
 6527         struct  ccb_trans_settings_scsi *scsi;
 6528         struct  ccb_trans_settings_scsi *cur_scsi;
 6529         struct  cam_sim *sim;
 6530         struct  scsi_inquiry_data *inq_data;
 6531 
 6532         if (device == NULL) {
 6533                 cts->ccb_h.status = CAM_PATH_INVALID;
 6534                 xpt_done((union ccb *)cts);
 6535                 return;
 6536         }
 6537 
 6538         if (cts->protocol == PROTO_UNKNOWN
 6539          || cts->protocol == PROTO_UNSPECIFIED) {
 6540                 cts->protocol = device->protocol;
 6541                 cts->protocol_version = device->protocol_version;
 6542         }
 6543 
 6544         if (cts->protocol_version == PROTO_VERSION_UNKNOWN
 6545          || cts->protocol_version == PROTO_VERSION_UNSPECIFIED)
 6546                 cts->protocol_version = device->protocol_version;
 6547 
 6548         if (cts->protocol != device->protocol) {
 6549                 xpt_print(cts->ccb_h.path, "Uninitialized Protocol %x:%x?\n",
 6550                        cts->protocol, device->protocol);
 6551                 cts->protocol = device->protocol;
 6552         }
 6553 
 6554         if (cts->protocol_version > device->protocol_version) {
 6555                 if (bootverbose) {
 6556                         xpt_print(cts->ccb_h.path, "Down reving Protocol "
 6557                             "Version from %d to %d?\n", cts->protocol_version,
 6558                             device->protocol_version);
 6559                 }
 6560                 cts->protocol_version = device->protocol_version;
 6561         }
 6562 
 6563         if (cts->transport == XPORT_UNKNOWN
 6564          || cts->transport == XPORT_UNSPECIFIED) {
 6565                 cts->transport = device->transport;
 6566                 cts->transport_version = device->transport_version;
 6567         }
 6568 
 6569         if (cts->transport_version == XPORT_VERSION_UNKNOWN
 6570          || cts->transport_version == XPORT_VERSION_UNSPECIFIED)
 6571                 cts->transport_version = device->transport_version;
 6572 
 6573         if (cts->transport != device->transport) {
 6574                 xpt_print(cts->ccb_h.path, "Uninitialized Transport %x:%x?\n",
 6575                     cts->transport, device->transport);
 6576                 cts->transport = device->transport;
 6577         }
 6578 
 6579         if (cts->transport_version > device->transport_version) {
 6580                 if (bootverbose) {
 6581                         xpt_print(cts->ccb_h.path, "Down reving Transport "
 6582                             "Version from %d to %d?\n", cts->transport_version,
 6583                             device->transport_version);
 6584                 }
 6585                 cts->transport_version = device->transport_version;
 6586         }
 6587 
 6588         sim = cts->ccb_h.path->bus->sim;
 6589 
 6590         /*
 6591          * Nothing more of interest to do unless
 6592          * this is a device connected via the
 6593          * SCSI protocol.
 6594          */
 6595         if (cts->protocol != PROTO_SCSI) {
 6596                 if (async_update == FALSE)
 6597                         (*(sim->sim_action))(sim, (union ccb *)cts);
 6598                 return;
 6599         }
 6600 
 6601         inq_data = &device->inq_data;
 6602         scsi = &cts->proto_specific.scsi;
 6603         xpt_setup_ccb(&cpi.ccb_h, cts->ccb_h.path, /*priority*/1);
 6604         cpi.ccb_h.func_code = XPT_PATH_INQ;
 6605         xpt_action((union ccb *)&cpi);
 6606 
 6607         /* SCSI specific sanity checking */
 6608         if ((cpi.hba_inquiry & PI_TAG_ABLE) == 0
 6609          || (INQ_DATA_TQ_ENABLED(inq_data)) == 0
 6610          || (device->queue_flags & SCP_QUEUE_DQUE) != 0
 6611          || (device->quirk->mintags == 0)) {
 6612                 /*
 6613                  * Can't tag on hardware that doesn't support tags,
 6614                  * doesn't have it enabled, or has broken tag support.
 6615                  */
 6616                 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
 6617         }
 6618 
 6619         if (async_update == FALSE) {
 6620                 /*
 6621                  * Perform sanity checking against what the
 6622                  * controller and device can do.
 6623                  */
 6624                 xpt_setup_ccb(&cur_cts.ccb_h, cts->ccb_h.path, /*priority*/1);
 6625                 cur_cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
 6626                 cur_cts.type = cts->type;
 6627                 xpt_action((union ccb *)&cur_cts);
 6628                 if ((cur_cts.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
 6629                         return;
 6630                 }
 6631                 cur_scsi = &cur_cts.proto_specific.scsi;
 6632                 if ((scsi->valid & CTS_SCSI_VALID_TQ) == 0) {
 6633                         scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
 6634                         scsi->flags |= cur_scsi->flags & CTS_SCSI_FLAGS_TAG_ENB;
 6635                 }
 6636                 if ((cur_scsi->valid & CTS_SCSI_VALID_TQ) == 0)
 6637                         scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
 6638         }
 6639 
 6640         /* SPI specific sanity checking */
 6641         if (cts->transport == XPORT_SPI && async_update == FALSE) {
 6642                 u_int spi3caps;
 6643                 struct ccb_trans_settings_spi *spi;
 6644                 struct ccb_trans_settings_spi *cur_spi;
 6645 
 6646                 spi = &cts->xport_specific.spi;
 6647 
 6648                 cur_spi = &cur_cts.xport_specific.spi;
 6649 
 6650                 /* Fill in any gaps in what the user gave us */
 6651                 if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0)
 6652                         spi->sync_period = cur_spi->sync_period;
 6653                 if ((cur_spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0)
 6654                         spi->sync_period = 0;
 6655                 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0)
 6656                         spi->sync_offset = cur_spi->sync_offset;
 6657                 if ((cur_spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0)
 6658                         spi->sync_offset = 0;
 6659                 if ((spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0)
 6660                         spi->ppr_options = cur_spi->ppr_options;
 6661                 if ((cur_spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0)
 6662                         spi->ppr_options = 0;
 6663                 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) == 0)
 6664                         spi->bus_width = cur_spi->bus_width;
 6665                 if ((cur_spi->valid & CTS_SPI_VALID_BUS_WIDTH) == 0)
 6666                         spi->bus_width = 0;
 6667                 if ((spi->valid & CTS_SPI_VALID_DISC) == 0) {
 6668                         spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
 6669                         spi->flags |= cur_spi->flags & CTS_SPI_FLAGS_DISC_ENB;
 6670                 }
 6671                 if ((cur_spi->valid & CTS_SPI_VALID_DISC) == 0)
 6672                         spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
 6673                 if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0
 6674                   && (inq_data->flags & SID_Sync) == 0
 6675                   && cts->type == CTS_TYPE_CURRENT_SETTINGS)
 6676                  || ((cpi.hba_inquiry & PI_SDTR_ABLE) == 0)
 6677                  || (spi->sync_offset == 0)
 6678                  || (spi->sync_period == 0)) {
 6679                         /* Force async */
 6680                         spi->sync_period = 0;
 6681                         spi->sync_offset = 0;
 6682                 }
 6683 
 6684                 switch (spi->bus_width) {
 6685                 case MSG_EXT_WDTR_BUS_32_BIT:
 6686                         if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
 6687                           || (inq_data->flags & SID_WBus32) != 0
 6688                           || cts->type == CTS_TYPE_USER_SETTINGS)
 6689                          && (cpi.hba_inquiry & PI_WIDE_32) != 0)
 6690                                 break;
 6691                         /* Fall Through to 16-bit */
 6692                 case MSG_EXT_WDTR_BUS_16_BIT:
 6693                         if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
 6694                           || (inq_data->flags & SID_WBus16) != 0
 6695                           || cts->type == CTS_TYPE_USER_SETTINGS)
 6696                          && (cpi.hba_inquiry & PI_WIDE_16) != 0) {
 6697                                 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
 6698                                 break;
 6699                         }
 6700                         /* Fall Through to 8-bit */
 6701                 default: /* New bus width?? */
 6702                 case MSG_EXT_WDTR_BUS_8_BIT:
 6703                         /* All targets can do this */
 6704                         spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
 6705                         break;
 6706                 }
 6707 
 6708                 spi3caps = cpi.xport_specific.spi.ppr_options;
 6709                 if ((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0
 6710                  && cts->type == CTS_TYPE_CURRENT_SETTINGS)
 6711                         spi3caps &= inq_data->spi3data;
 6712 
 6713                 if ((spi3caps & SID_SPI_CLOCK_DT) == 0)
 6714                         spi->ppr_options &= ~MSG_EXT_PPR_DT_REQ;
 6715 
 6716                 if ((spi3caps & SID_SPI_IUS) == 0)
 6717                         spi->ppr_options &= ~MSG_EXT_PPR_IU_REQ;
 6718 
 6719                 if ((spi3caps & SID_SPI_QAS) == 0)
 6720                         spi->ppr_options &= ~MSG_EXT_PPR_QAS_REQ;
 6721 
 6722                 /* No SPI Transfer settings are allowed unless we are wide */
 6723                 if (spi->bus_width == 0)
 6724                         spi->ppr_options = 0;
 6725 
 6726                 if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) == 0) {
 6727                         /*
 6728                          * Can't tag queue without disconnection.
 6729                          */
 6730                         scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
 6731                         scsi->valid |= CTS_SCSI_VALID_TQ;
 6732                 }
 6733 
 6734                 /*
 6735                  * If we are currently performing tagged transactions to
 6736                  * this device and want to change its negotiation parameters,
 6737                  * go non-tagged for a bit to give the controller a chance to
 6738                  * negotiate unhampered by tag messages.
 6739                  */
 6740                 if (cts->type == CTS_TYPE_CURRENT_SETTINGS
 6741                  && (device->inq_flags & SID_CmdQue) != 0
 6742                  && (scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0
 6743                  && (spi->flags & (CTS_SPI_VALID_SYNC_RATE|
 6744                                    CTS_SPI_VALID_SYNC_OFFSET|
 6745                                    CTS_SPI_VALID_BUS_WIDTH)) != 0)
 6746                         xpt_toggle_tags(cts->ccb_h.path);
 6747         }
 6748 
 6749         if (cts->type == CTS_TYPE_CURRENT_SETTINGS
 6750          && (scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
 6751                 int device_tagenb;
 6752 
 6753                 /*
 6754                  * If we are transitioning from tags to no-tags or
 6755                  * vice-versa, we need to carefully freeze and restart
 6756                  * the queue so that we don't overlap tagged and non-tagged
 6757                  * commands.  We also temporarily stop tags if there is
 6758                  * a change in transfer negotiation settings to allow
 6759                  * "tag-less" negotiation.
 6760                  */
 6761                 if ((device->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
 6762                  || (device->inq_flags & SID_CmdQue) != 0)
 6763                         device_tagenb = TRUE;
 6764                 else
 6765                         device_tagenb = FALSE;
 6766 
 6767                 if (((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0
 6768                   && device_tagenb == FALSE)
 6769                  || ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) == 0
 6770                   && device_tagenb == TRUE)) {
 6771 
 6772                         if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) {
 6773                                 /*
 6774                                  * Delay change to use tags until after a
 6775                                  * few commands have gone to this device so
 6776                                  * the controller has time to perform transfer
 6777                                  * negotiations without tagged messages getting
 6778                                  * in the way.
 6779                                  */
 6780                                 device->tag_delay_count = CAM_TAG_DELAY_COUNT;
 6781                                 device->flags |= CAM_DEV_TAG_AFTER_COUNT;
 6782                         } else {
 6783                                 struct ccb_relsim crs;
 6784 
 6785                                 xpt_freeze_devq(cts->ccb_h.path, /*count*/1);
 6786                                 device->inq_flags &= ~SID_CmdQue;
 6787                                 xpt_dev_ccbq_resize(cts->ccb_h.path,
 6788                                                     sim->max_dev_openings);
 6789                                 device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
 6790                                 device->tag_delay_count = 0;
 6791 
 6792                                 xpt_setup_ccb(&crs.ccb_h, cts->ccb_h.path,
 6793                                               /*priority*/1);
 6794                                 crs.ccb_h.func_code = XPT_REL_SIMQ;
 6795                                 crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
 6796                                 crs.openings
 6797                                     = crs.release_timeout
 6798                                     = crs.qfrozen_cnt
 6799                                     = 0;
 6800                                 xpt_action((union ccb *)&crs);
 6801                         }
 6802                 }
 6803         }
 6804         if (async_update == FALSE)
 6805                 (*(sim->sim_action))(sim, (union ccb *)cts);
 6806 }
 6807 
 6808 
 6809 static void
 6810 xpt_toggle_tags(struct cam_path *path)
 6811 {
 6812         struct cam_ed *dev;
 6813 
 6814         /*
 6815          * Give controllers a chance to renegotiate
 6816          * before starting tag operations.  We
 6817          * "toggle" tagged queuing off then on
 6818          * which causes the tag enable command delay
 6819          * counter to come into effect.
 6820          */
 6821         dev = path->device;
 6822         if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
 6823          || ((dev->inq_flags & SID_CmdQue) != 0
 6824           && (dev->inq_flags & (SID_Sync|SID_WBus16|SID_WBus32)) != 0)) {
 6825                 struct ccb_trans_settings cts;
 6826 
 6827                 xpt_setup_ccb(&cts.ccb_h, path, 1);
 6828                 cts.protocol = PROTO_SCSI;
 6829                 cts.protocol_version = PROTO_VERSION_UNSPECIFIED;
 6830                 cts.transport = XPORT_UNSPECIFIED;
 6831                 cts.transport_version = XPORT_VERSION_UNSPECIFIED;
 6832                 cts.proto_specific.scsi.flags = 0;
 6833                 cts.proto_specific.scsi.valid = CTS_SCSI_VALID_TQ;
 6834                 xpt_set_transfer_settings(&cts, path->device,
 6835                                           /*async_update*/TRUE);
 6836                 cts.proto_specific.scsi.flags = CTS_SCSI_FLAGS_TAG_ENB;
 6837                 xpt_set_transfer_settings(&cts, path->device,
 6838                                           /*async_update*/TRUE);
 6839         }
 6840 }
 6841 
 6842 static void
 6843 xpt_start_tags(struct cam_path *path)
 6844 {
 6845         struct ccb_relsim crs;
 6846         struct cam_ed *device;
 6847         struct cam_sim *sim;
 6848         int    newopenings;
 6849 
 6850         device = path->device;
 6851         sim = path->bus->sim;
 6852         device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
 6853         xpt_freeze_devq(path, /*count*/1);
 6854         device->inq_flags |= SID_CmdQue;
 6855         if (device->tag_saved_openings != 0)
 6856                 newopenings = device->tag_saved_openings;
 6857         else
 6858                 newopenings = min(device->quirk->maxtags,
 6859                                   sim->max_tagged_dev_openings);
 6860         xpt_dev_ccbq_resize(path, newopenings);
 6861         xpt_setup_ccb(&crs.ccb_h, path, /*priority*/1);
 6862         crs.ccb_h.func_code = XPT_REL_SIMQ;
 6863         crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
 6864         crs.openings
 6865             = crs.release_timeout
 6866             = crs.qfrozen_cnt
 6867             = 0;
 6868         xpt_action((union ccb *)&crs);
 6869 }
 6870 
 6871 static int busses_to_config;
 6872 static int busses_to_reset;
 6873 
 6874 static int
 6875 xptconfigbuscountfunc(struct cam_eb *bus, void *arg)
 6876 {
 6877 
 6878         mtx_assert(bus->sim->mtx, MA_OWNED);
 6879 
 6880         if (bus->path_id != CAM_XPT_PATH_ID) {
 6881                 struct cam_path path;