The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System, Second Edition

[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/cam/cam_xpt.c

Version: -  FREEBSD  -  FREEBSD10  -  FREEBSD9  -  FREEBSD92  -  FREEBSD91  -  FREEBSD90  -  FREEBSD8  -  FREEBSD82  -  FREEBSD81  -  FREEBSD80  -  FREEBSD7  -  FREEBSD74  -  FREEBSD73  -  FREEBSD72  -  FREEBSD71  -  FREEBSD70  -  FREEBSD6  -  FREEBSD64  -  FREEBSD63  -  FREEBSD62  -  FREEBSD61  -  FREEBSD60  -  FREEBSD5  -  FREEBSD55  -  FREEBSD54  -  FREEBSD53  -  FREEBSD52  -  FREEBSD51  -  FREEBSD50  -  FREEBSD4  -  FREEBSD3  -  FREEBSD22  -  linux-2.6  -  linux-2.4.22  -  MK83  -  MK84  -  PLAN9  -  DFBSD  -  NETBSD  -  NETBSD5  -  NETBSD4  -  NETBSD3  -  NETBSD20  -  OPENBSD  -  xnu-517  -  xnu-792  -  xnu-792.6.70  -  xnu-1228  -  xnu-1456.1.26  -  xnu-1699.24.8  -  xnu-2050.18.24  -  OPENSOLARIS  -  minix-3-1-1 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Implementation of the Common Access Method Transport (XPT) layer.
    3  *
    4  * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs.
    5  * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry.
    6  * All rights reserved.
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions, and the following disclaimer,
   13  *    without modification, immediately at the beginning of the file.
   14  * 2. The name of the author may not be used to endorse or promote products
   15  *    derived from this software without specific prior written permission.
   16  *
   17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   20  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
   21  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   27  * SUCH DAMAGE.
   28  */
   29 
   30 #include <sys/cdefs.h>
   31 __FBSDID("$FreeBSD: src/sys/cam/cam_xpt.c,v 1.155.2.2 2006/02/26 22:38:39 iedowse Exp $");
   32 
   33 #include <sys/param.h>
   34 #include <sys/bus.h>
   35 #include <sys/systm.h>
   36 #include <sys/types.h>
   37 #include <sys/malloc.h>
   38 #include <sys/kernel.h>
   39 #include <sys/time.h>
   40 #include <sys/conf.h>
   41 #include <sys/fcntl.h>
   42 #include <sys/md5.h>
   43 #include <sys/interrupt.h>
   44 #include <sys/sbuf.h>
   45 
   46 #include <sys/lock.h>
   47 #include <sys/mutex.h>
   48 #include <sys/sysctl.h>
   49 
   50 #ifdef PC98
   51 #include <pc98/pc98/pc98_machdep.h>     /* geometry translation */
   52 #endif
   53 
   54 #include <cam/cam.h>
   55 #include <cam/cam_ccb.h>
   56 #include <cam/cam_periph.h>
   57 #include <cam/cam_sim.h>
   58 #include <cam/cam_xpt.h>
   59 #include <cam/cam_xpt_sim.h>
   60 #include <cam/cam_xpt_periph.h>
   61 #include <cam/cam_debug.h>
   62 
   63 #include <cam/scsi/scsi_all.h>
   64 #include <cam/scsi/scsi_message.h>
   65 #include <cam/scsi/scsi_pass.h>
   66 #include "opt_cam.h"
   67 
   68 /* Datastructures internal to the xpt layer */
   69 MALLOC_DEFINE(M_CAMXPT, "CAM XPT", "CAM XPT buffers");
   70 
   71 /*
   72  * Definition of an async handler callback block.  These are used to add
   73  * SIMs and peripherals to the async callback lists.
   74  */
   75 struct async_node {
   76         SLIST_ENTRY(async_node) links;
   77         u_int32_t       event_enable;   /* Async Event enables */
   78         void            (*callback)(void *arg, u_int32_t code,
   79                                     struct cam_path *path, void *args);
   80         void            *callback_arg;
   81 };
   82 
   83 SLIST_HEAD(async_list, async_node);
   84 SLIST_HEAD(periph_list, cam_periph);
   85 static STAILQ_HEAD(highpowerlist, ccb_hdr) highpowerq;
   86 
   87 /*
   88  * This is the maximum number of high powered commands (e.g. start unit)
   89  * that can be outstanding at a particular time.
   90  */
   91 #ifndef CAM_MAX_HIGHPOWER
   92 #define CAM_MAX_HIGHPOWER  4
   93 #endif
   94 
   95 /* number of high powered commands that can go through right now */
   96 static int num_highpower = CAM_MAX_HIGHPOWER;
   97 
   98 /*
   99  * Structure for queueing a device in a run queue.
  100  * There is one run queue for allocating new ccbs,
  101  * and another for sending ccbs to the controller.
  102  */
  103 struct cam_ed_qinfo {
  104         cam_pinfo pinfo;
  105         struct    cam_ed *device;
  106 };
  107 
  108 /*
  109  * The CAM EDT (Existing Device Table) contains the device information for
  110  * all devices for all busses in the system.  The table contains a
  111  * cam_ed structure for each device on the bus.
  112  */
  113 struct cam_ed {
  114         TAILQ_ENTRY(cam_ed) links;
  115         struct  cam_ed_qinfo alloc_ccb_entry;
  116         struct  cam_ed_qinfo send_ccb_entry;
  117         struct  cam_et   *target;
  118         lun_id_t         lun_id;
  119         struct  camq drvq;              /*
  120                                          * Queue of type drivers wanting to do
  121                                          * work on this device.
  122                                          */
  123         struct  cam_ccbq ccbq;          /* Queue of pending ccbs */
  124         struct  async_list asyncs;      /* Async callback info for this B/T/L */
  125         struct  periph_list periphs;    /* All attached devices */
  126         u_int   generation;             /* Generation number */
  127         struct  cam_periph *owner;      /* Peripheral driver's ownership tag */
  128         struct  xpt_quirk_entry *quirk; /* Oddities about this device */
  129                                         /* Storage for the inquiry data */
  130 #ifdef CAM_NEW_TRAN_CODE
  131         cam_proto        protocol;
  132         u_int            protocol_version;
  133         cam_xport        transport;
  134         u_int            transport_version;
  135 #endif /* CAM_NEW_TRAN_CODE */
  136         struct           scsi_inquiry_data inq_data;
  137         u_int8_t         inq_flags;     /*
  138                                          * Current settings for inquiry flags.
  139                                          * This allows us to override settings
  140                                          * like disconnection and tagged
  141                                          * queuing for a device.
  142                                          */
  143         u_int8_t         queue_flags;   /* Queue flags from the control page */
  144         u_int8_t         serial_num_len;
  145         u_int8_t        *serial_num;
  146         u_int32_t        qfrozen_cnt;
  147         u_int32_t        flags;
  148 #define CAM_DEV_UNCONFIGURED            0x01
  149 #define CAM_DEV_REL_TIMEOUT_PENDING     0x02
  150 #define CAM_DEV_REL_ON_COMPLETE         0x04
  151 #define CAM_DEV_REL_ON_QUEUE_EMPTY      0x08
  152 #define CAM_DEV_RESIZE_QUEUE_NEEDED     0x10
  153 #define CAM_DEV_TAG_AFTER_COUNT         0x20
  154 #define CAM_DEV_INQUIRY_DATA_VALID      0x40
  155         u_int32_t        tag_delay_count;
  156 #define CAM_TAG_DELAY_COUNT             5
  157         u_int32_t        tag_saved_openings;
  158         u_int32_t        refcount;
  159         struct           callout_handle c_handle;
  160 };
  161 
  162 /*
  163  * Each target is represented by an ET (Existing Target).  These
  164  * entries are created when a target is successfully probed with an
  165  * identify, and removed when a device fails to respond after a number
  166  * of retries, or a bus rescan finds the device missing.
  167  */
  168 struct cam_et { 
  169         TAILQ_HEAD(, cam_ed) ed_entries;
  170         TAILQ_ENTRY(cam_et) links;
  171         struct  cam_eb  *bus;   
  172         target_id_t     target_id;
  173         u_int32_t       refcount;       
  174         u_int           generation;
  175         struct          timeval last_reset;
  176 };
  177 
  178 /*
  179  * Each bus is represented by an EB (Existing Bus).  These entries
  180  * are created by calls to xpt_bus_register and deleted by calls to
  181  * xpt_bus_deregister.
  182  */
  183 struct cam_eb { 
  184         TAILQ_HEAD(, cam_et) et_entries;
  185         TAILQ_ENTRY(cam_eb)  links;
  186         path_id_t            path_id;
  187         struct cam_sim       *sim;
  188         struct timeval       last_reset;
  189         u_int32_t            flags;
  190 #define CAM_EB_RUNQ_SCHEDULED   0x01
  191         u_int32_t            refcount;
  192         u_int                generation;
  193 };
  194 
  195 struct cam_path {
  196         struct cam_periph *periph;
  197         struct cam_eb     *bus;
  198         struct cam_et     *target;
  199         struct cam_ed     *device;
  200 };
  201 
  202 struct xpt_quirk_entry {
  203         struct scsi_inquiry_pattern inq_pat;
  204         u_int8_t quirks;
  205 #define CAM_QUIRK_NOLUNS        0x01
  206 #define CAM_QUIRK_NOSERIAL      0x02
  207 #define CAM_QUIRK_HILUNS        0x04
  208 #define CAM_QUIRK_NOHILUNS      0x08
  209         u_int mintags;
  210         u_int maxtags;
  211 };
  212 
  213 static int cam_srch_hi = 0;
  214 TUNABLE_INT("kern.cam.cam_srch_hi", &cam_srch_hi);
  215 static int sysctl_cam_search_luns(SYSCTL_HANDLER_ARGS);
  216 SYSCTL_PROC(_kern_cam, OID_AUTO, cam_srch_hi, CTLTYPE_INT|CTLFLAG_RW, 0, 0,
  217     sysctl_cam_search_luns, "I",
  218     "allow search above LUN 7 for SCSI3 and greater devices");
  219 
  220 #define CAM_SCSI2_MAXLUN        8
  221 /*
  222  * If we're not quirked to search <= the first 8 luns
  223  * and we are either quirked to search above lun 8,
  224  * or we're > SCSI-2 and we've enabled hilun searching,
  225  * or we're > SCSI-2 and the last lun was a success,
  226  * we can look for luns above lun 8.
  227  */
  228 #define CAN_SRCH_HI_SPARSE(dv)                          \
  229   (((dv->quirk->quirks & CAM_QUIRK_NOHILUNS) == 0)      \
  230   && ((dv->quirk->quirks & CAM_QUIRK_HILUNS)            \
  231   || (SID_ANSI_REV(&dv->inq_data) > SCSI_REV_2 && cam_srch_hi)))
  232 
  233 #define CAN_SRCH_HI_DENSE(dv)                           \
  234   (((dv->quirk->quirks & CAM_QUIRK_NOHILUNS) == 0)      \
  235   && ((dv->quirk->quirks & CAM_QUIRK_HILUNS)            \
  236   || (SID_ANSI_REV(&dv->inq_data) > SCSI_REV_2)))
  237 
  238 typedef enum {
  239         XPT_FLAG_OPEN           = 0x01
  240 } xpt_flags;
  241 
  242 struct xpt_softc {
  243         xpt_flags       flags;
  244         u_int32_t       generation;
  245 };
  246 
  247 static const char quantum[] = "QUANTUM";
  248 static const char sony[] = "SONY";
  249 static const char west_digital[] = "WDIGTL";
  250 static const char samsung[] = "SAMSUNG";
  251 static const char seagate[] = "SEAGATE";
  252 static const char microp[] = "MICROP";
  253 
  254 static struct xpt_quirk_entry xpt_quirk_table[] = 
  255 {
  256         {
  257                 /* Reports QUEUE FULL for temporary resource shortages */
  258                 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP39100*", "*" },
  259                 /*quirks*/0, /*mintags*/24, /*maxtags*/32
  260         },
  261         {
  262                 /* Reports QUEUE FULL for temporary resource shortages */
  263                 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP34550*", "*" },
  264                 /*quirks*/0, /*mintags*/24, /*maxtags*/32
  265         },
  266         {
  267                 /* Reports QUEUE FULL for temporary resource shortages */
  268                 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP32275*", "*" },
  269                 /*quirks*/0, /*mintags*/24, /*maxtags*/32
  270         },
  271         {
  272                 /* Broken tagged queuing drive */
  273                 { T_DIRECT, SIP_MEDIA_FIXED, microp, "4421-07*", "*" },
  274                 /*quirks*/0, /*mintags*/0, /*maxtags*/
  275         },
  276         {
  277                 /* Broken tagged queuing drive */
  278                 { T_DIRECT, SIP_MEDIA_FIXED, "HP", "C372*", "*" },
  279                 /*quirks*/0, /*mintags*/0, /*maxtags*/
  280         },
  281         {
  282                 /* Broken tagged queuing drive */
  283                 { T_DIRECT, SIP_MEDIA_FIXED, microp, "3391*", "x43h" },
  284                 /*quirks*/0, /*mintags*/0, /*maxtags*/
  285         },
  286         {
  287                 /*
  288                  * Unfortunately, the Quantum Atlas III has the same
  289                  * problem as the Atlas II drives above.
  290                  * Reported by: "Johan Granlund" <johan@granlund.nu>
  291                  *
  292                  * For future reference, the drive with the problem was:
  293                  * QUANTUM QM39100TD-SW N1B0
  294                  * 
  295                  * It's possible that Quantum will fix the problem in later
  296                  * firmware revisions.  If that happens, the quirk entry
  297                  * will need to be made specific to the firmware revisions
  298                  * with the problem.
  299                  * 
  300                  */
  301                 /* Reports QUEUE FULL for temporary resource shortages */
  302                 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM39100*", "*" },
  303                 /*quirks*/0, /*mintags*/24, /*maxtags*/32
  304         },
  305         {
  306                 /*
  307                  * 18 Gig Atlas III, same problem as the 9G version.
  308                  * Reported by: Andre Albsmeier
  309                  *              <andre.albsmeier@mchp.siemens.de>
  310                  *
  311                  * For future reference, the drive with the problem was:
  312                  * QUANTUM QM318000TD-S N491
  313                  */
  314                 /* Reports QUEUE FULL for temporary resource shortages */
  315                 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM318000*", "*" },
  316                 /*quirks*/0, /*mintags*/24, /*maxtags*/32
  317         },
  318         {
  319                 /*
  320                  * Broken tagged queuing drive
  321                  * Reported by: Bret Ford <bford@uop.cs.uop.edu>
  322                  *         and: Martin Renters <martin@tdc.on.ca>
  323                  */
  324                 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST410800*", "71*" },
  325                 /*quirks*/0, /*mintags*/0, /*maxtags*/
  326         },
  327                 /*
  328                  * The Seagate Medalist Pro drives have very poor write
  329                  * performance with anything more than 2 tags.
  330                  * 
  331                  * Reported by:  Paul van der Zwan <paulz@trantor.xs4all.nl>
  332                  * Drive:  <SEAGATE ST36530N 1444>
  333                  *
  334                  * Reported by:  Jeremy Lea <reg@shale.csir.co.za>
  335                  * Drive:  <SEAGATE ST34520W 1281>
  336                  *
  337                  * No one has actually reported that the 9G version
  338                  * (ST39140*) of the Medalist Pro has the same problem, but
  339                  * we're assuming that it does because the 4G and 6.5G
  340                  * versions of the drive are broken.
  341                  */
  342         {
  343                 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST34520*", "*"},
  344                 /*quirks*/0, /*mintags*/2, /*maxtags*/2
  345         },
  346         {
  347                 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST36530*", "*"},
  348                 /*quirks*/0, /*mintags*/2, /*maxtags*/2
  349         },
  350         {
  351                 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST39140*", "*"},
  352                 /*quirks*/0, /*mintags*/2, /*maxtags*/2
  353         },
  354         {
  355                 /*
  356                  * Slow when tagged queueing is enabled.  Write performance
  357                  * steadily drops off with more and more concurrent
  358                  * transactions.  Best sequential write performance with
  359                  * tagged queueing turned off and write caching turned on.
  360                  *
  361                  * PR:  kern/10398
  362                  * Submitted by:  Hideaki Okada <hokada@isl.melco.co.jp>
  363                  * Drive:  DCAS-34330 w/ "S65A" firmware.
  364                  *
  365                  * The drive with the problem had the "S65A" firmware
  366                  * revision, and has also been reported (by Stephen J.
  367                  * Roznowski <sjr@home.net>) for a drive with the "S61A"
  368                  * firmware revision.
  369                  *
  370                  * Although no one has reported problems with the 2 gig
  371                  * version of the DCAS drive, the assumption is that it
  372                  * has the same problems as the 4 gig version.  Therefore
  373                  * this quirk entries disables tagged queueing for all
  374                  * DCAS drives.
  375                  */
  376                 { T_DIRECT, SIP_MEDIA_FIXED, "IBM", "DCAS*", "*" },
  377                 /*quirks*/0, /*mintags*/0, /*maxtags*/
  378         },
  379         {
  380                 /* Broken tagged queuing drive */
  381                 { T_DIRECT, SIP_MEDIA_REMOVABLE, "iomega", "jaz*", "*" },
  382                 /*quirks*/0, /*mintags*/0, /*maxtags*/
  383         },
  384         {
  385                 /* Broken tagged queuing drive */ 
  386                 { T_DIRECT, SIP_MEDIA_FIXED, "CONNER", "CFP2107*", "*" },
  387                 /*quirks*/0, /*mintags*/0, /*maxtags*/
  388         },
  389         {
  390                 /*
  391                  * Broken tagged queuing drive.
  392                  * Submitted by:
  393                  * NAKAJI Hiroyuki <nakaji@zeisei.dpri.kyoto-u.ac.jp>
  394                  * in PR kern/9535
  395                  */
  396                 { T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN34324U*", "*" },
  397                 /*quirks*/0, /*mintags*/0, /*maxtags*/
  398         },
  399         {
  400                 /*
  401                  * Slow when tagged queueing is enabled. (1.5MB/sec versus
  402                  * 8MB/sec.)
  403                  * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
  404                  * Best performance with these drives is achieved with
  405                  * tagged queueing turned off, and write caching turned on.
  406                  */
  407                 { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "WDE*", "*" },
  408                 /*quirks*/0, /*mintags*/0, /*maxtags*/
  409         },
  410         {
  411                 /*
  412                  * Slow when tagged queueing is enabled. (1.5MB/sec versus
  413                  * 8MB/sec.)
  414                  * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
  415                  * Best performance with these drives is achieved with
  416                  * tagged queueing turned off, and write caching turned on.
  417                  */
  418                 { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "ENTERPRISE", "*" },
  419                 /*quirks*/0, /*mintags*/0, /*maxtags*/
  420         },
  421         {
  422                 /*
  423                  * Doesn't handle queue full condition correctly,
  424                  * so we need to limit maxtags to what the device
  425                  * can handle instead of determining this automatically.
  426                  */
  427                 { T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN321010S*", "*" },
  428                 /*quirks*/0, /*mintags*/2, /*maxtags*/32
  429         },
  430         {
  431                 /* Really only one LUN */
  432                 { T_ENCLOSURE, SIP_MEDIA_FIXED, "SUN", "SENA", "*" },
  433                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
  434         },
  435         {
  436                 /* I can't believe we need a quirk for DPT volumes. */
  437                 { T_ANY, SIP_MEDIA_FIXED|SIP_MEDIA_REMOVABLE, "DPT", "*", "*" },
  438                 CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS,
  439                 /*mintags*/0, /*maxtags*/255
  440         },
  441         {
  442                 /*
  443                  * Many Sony CDROM drives don't like multi-LUN probing.
  444                  */
  445                 { T_CDROM, SIP_MEDIA_REMOVABLE, sony, "CD-ROM CDU*", "*" },
  446                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
  447         },
  448         {
  449                 /*
  450                  * This drive doesn't like multiple LUN probing.
  451                  * Submitted by:  Parag Patel <parag@cgt.com>
  452                  */
  453                 { T_WORM, SIP_MEDIA_REMOVABLE, sony, "CD-R   CDU9*", "*" },
  454                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
  455         },
  456         {
  457                 { T_WORM, SIP_MEDIA_REMOVABLE, "YAMAHA", "CDR100*", "*" },
  458                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
  459         },
  460         {
  461                 /*
  462                  * The 8200 doesn't like multi-lun probing, and probably
  463                  * don't like serial number requests either.
  464                  */
  465                 {
  466                         T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE",
  467                         "EXB-8200*", "*"
  468                 },
  469                 CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
  470         },
  471         {
  472                 /*
  473                  * Let's try the same as above, but for a drive that says
  474                  * it's an IPL-6860 but is actually an EXB 8200.
  475                  */
  476                 {
  477                         T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE",
  478                         "IPL-6860*", "*"
  479                 },
  480                 CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
  481         },
  482         {
  483                 /*
  484                  * These Hitachi drives don't like multi-lun probing.
  485                  * The PR submitter has a DK319H, but says that the Linux
  486                  * kernel has a similar work-around for the DK312 and DK314,
  487                  * so all DK31* drives are quirked here.
  488                  * PR:            misc/18793
  489                  * Submitted by:  Paul Haddad <paul@pth.com>
  490                  */
  491                 { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "DK31*", "*" },
  492                 CAM_QUIRK_NOLUNS, /*mintags*/2, /*maxtags*/255
  493         },
  494         {
  495                 /*
  496                  * The Hitachi CJ series with J8A8 firmware apparantly has
  497                  * problems with tagged commands.
  498                  * PR: 23536
  499                  * Reported by: amagai@nue.org
  500                  */
  501                 { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "DK32CJ*", "J8A8" },
  502                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
  503         },
  504         {
  505                 /*
  506                  * These are the large storage arrays.
  507                  * Submitted by:  William Carrel <william.carrel@infospace.com>
  508                  */
  509                 { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "OPEN*", "*" },
  510                 CAM_QUIRK_HILUNS, 2, 1024
  511         },
  512         {
  513                 /*
  514                  * This old revision of the TDC3600 is also SCSI-1, and
  515                  * hangs upon serial number probing.
  516                  */
  517                 {
  518                         T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "TANDBERG",
  519                         " TDC 3600", "U07:"
  520                 },
  521                 CAM_QUIRK_NOSERIAL, /*mintags*/0, /*maxtags*/
  522         },
  523         {
  524                 /*
  525                  * Maxtor Personal Storage 3000XT (Firewire)
  526                  * hangs upon serial number probing.
  527                  */
  528                 {
  529                         T_DIRECT, SIP_MEDIA_FIXED, "Maxtor",
  530                         "1394 storage", "*"
  531                 },
  532                 CAM_QUIRK_NOSERIAL, /*mintags*/0, /*maxtags*/
  533         },
  534         {
  535                 /*
  536                  * Would repond to all LUNs if asked for.
  537                  */
  538                 {
  539                         T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "CALIPER",
  540                         "CP150", "*"
  541                 },
  542                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
  543         },
  544         {
  545                 /*
  546                  * Would repond to all LUNs if asked for.
  547                  */
  548                 {
  549                         T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "KENNEDY",
  550                         "96X2*", "*"
  551                 },
  552                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
  553         },
  554         {
  555                 /* Submitted by: Matthew Dodd <winter@jurai.net> */
  556                 { T_PROCESSOR, SIP_MEDIA_FIXED, "Cabletrn", "EA41*", "*" },
  557                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
  558         },
  559         {
  560                 /* Submitted by: Matthew Dodd <winter@jurai.net> */
  561                 { T_PROCESSOR, SIP_MEDIA_FIXED, "CABLETRN", "EA41*", "*" },
  562                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
  563         },
  564         {
  565                 /* TeraSolutions special settings for TRC-22 RAID */
  566                 { T_DIRECT, SIP_MEDIA_FIXED, "TERASOLU", "TRC-22", "*" },
  567                   /*quirks*/0, /*mintags*/55, /*maxtags*/255
  568         },
  569         {
  570                 /* Veritas Storage Appliance */
  571                 { T_DIRECT, SIP_MEDIA_FIXED, "VERITAS", "*", "*" },
  572                   CAM_QUIRK_HILUNS, /*mintags*/2, /*maxtags*/1024
  573         },
  574         {
  575                 /*
  576                  * Would respond to all LUNs.  Device type and removable
  577                  * flag are jumper-selectable.
  578                  */
  579                 { T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED, "MaxOptix",
  580                   "Tahiti 1", "*"
  581                 },
  582                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
  583         },
  584         {
  585                 /* EasyRAID E5A aka. areca ARC-6010 */
  586                 { T_DIRECT, SIP_MEDIA_FIXED, "easyRAID", "*", "*" },
  587                   CAM_QUIRK_NOHILUNS, /*mintags*/2, /*maxtags*/255
  588         },
  589         {
  590                 /* Default tagged queuing parameters for all devices */
  591                 {
  592                   T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
  593                   /*vendor*/"*", /*product*/"*", /*revision*/"*"
  594                 },
  595                 /*quirks*/0, /*mintags*/2, /*maxtags*/255
  596         },
  597 };
  598 
  599 static const int xpt_quirk_table_size =
  600         sizeof(xpt_quirk_table) / sizeof(*xpt_quirk_table);
  601 
  602 typedef enum {
  603         DM_RET_COPY             = 0x01,
  604         DM_RET_FLAG_MASK        = 0x0f,
  605         DM_RET_NONE             = 0x00,
  606         DM_RET_STOP             = 0x10,
  607         DM_RET_DESCEND          = 0x20,
  608         DM_RET_ERROR            = 0x30,
  609         DM_RET_ACTION_MASK      = 0xf0
  610 } dev_match_ret;
  611 
  612 typedef enum {
  613         XPT_DEPTH_BUS,
  614         XPT_DEPTH_TARGET,
  615         XPT_DEPTH_DEVICE,
  616         XPT_DEPTH_PERIPH
  617 } xpt_traverse_depth;
  618 
  619 struct xpt_traverse_config {
  620         xpt_traverse_depth      depth;
  621         void                    *tr_func;
  622         void                    *tr_arg;
  623 };
  624 
  625 typedef int     xpt_busfunc_t (struct cam_eb *bus, void *arg);
  626 typedef int     xpt_targetfunc_t (struct cam_et *target, void *arg);
  627 typedef int     xpt_devicefunc_t (struct cam_ed *device, void *arg);
  628 typedef int     xpt_periphfunc_t (struct cam_periph *periph, void *arg);
  629 typedef int     xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg);
  630 
  631 /* Transport layer configuration information */
  632 static struct xpt_softc xsoftc;
  633 
  634 /* Queues for our software interrupt handler */
  635 typedef TAILQ_HEAD(cam_isrq, ccb_hdr) cam_isrq_t;
  636 static cam_isrq_t cam_bioq;
  637 static struct mtx cam_bioq_lock;
  638 
  639 /* "Pool" of inactive ccbs managed by xpt_alloc_ccb and xpt_free_ccb */
  640 static SLIST_HEAD(,ccb_hdr) ccb_freeq;
  641 static u_int xpt_max_ccbs;      /*
  642                                  * Maximum size of ccb pool.  Modified as
  643                                  * devices are added/removed or have their
  644                                  * opening counts changed.
  645                                  */
  646 static u_int xpt_ccb_count;     /* Current count of allocated ccbs */
  647 
  648 struct cam_periph *xpt_periph;
  649 
  650 static periph_init_t xpt_periph_init;
  651 
  652 static periph_init_t probe_periph_init;
  653 
  654 static struct periph_driver xpt_driver =
  655 {
  656         xpt_periph_init, "xpt",
  657         TAILQ_HEAD_INITIALIZER(xpt_driver.units)
  658 };
  659 
  660 static struct periph_driver probe_driver =
  661 {
  662         probe_periph_init, "probe",
  663         TAILQ_HEAD_INITIALIZER(probe_driver.units)
  664 };
  665 
  666 PERIPHDRIVER_DECLARE(xpt, xpt_driver);
  667 PERIPHDRIVER_DECLARE(probe, probe_driver);
  668 
  669 
  670 static d_open_t xptopen;
  671 static d_close_t xptclose;
  672 static d_ioctl_t xptioctl;
  673 
  674 static struct cdevsw xpt_cdevsw = {
  675         .d_version =    D_VERSION,
  676         .d_flags =      D_NEEDGIANT,
  677         .d_open =       xptopen,
  678         .d_close =      xptclose,
  679         .d_ioctl =      xptioctl,
  680         .d_name =       "xpt",
  681 };
  682 
  683 static struct intr_config_hook *xpt_config_hook;
  684 
  685 static void dead_sim_action(struct cam_sim *sim, union ccb *ccb);
  686 static void dead_sim_poll(struct cam_sim *sim);
  687 
  688 /* Dummy SIM that is used when the real one has gone. */
  689 static struct cam_sim cam_dead_sim = {
  690         .sim_action =   dead_sim_action,
  691         .sim_poll =     dead_sim_poll,
  692         .sim_name =     "dead_sim",
  693 };
  694 
  695 #define SIM_DEAD(sim)   ((sim) == &cam_dead_sim)
  696 
  697 /* Registered busses */
  698 static TAILQ_HEAD(,cam_eb) xpt_busses;
  699 static u_int bus_generation;
  700 
  701 /* Storage for debugging datastructures */
  702 #ifdef  CAMDEBUG
  703 struct cam_path *cam_dpath;
  704 u_int32_t cam_dflags;
  705 u_int32_t cam_debug_delay;
  706 #endif
  707 
  708 /* Pointers to software interrupt handlers */
  709 static void *cambio_ih;
  710 
  711 #if defined(CAM_DEBUG_FLAGS) && !defined(CAMDEBUG)
  712 #error "You must have options CAMDEBUG to use options CAM_DEBUG_FLAGS"
  713 #endif
  714 
  715 /*
  716  * In order to enable the CAM_DEBUG_* options, the user must have CAMDEBUG
  717  * enabled.  Also, the user must have either none, or all of CAM_DEBUG_BUS,
  718  * CAM_DEBUG_TARGET, and CAM_DEBUG_LUN specified.
  719  */
  720 #if defined(CAM_DEBUG_BUS) || defined(CAM_DEBUG_TARGET) \
  721     || defined(CAM_DEBUG_LUN)
  722 #ifdef CAMDEBUG
  723 #if !defined(CAM_DEBUG_BUS) || !defined(CAM_DEBUG_TARGET) \
  724     || !defined(CAM_DEBUG_LUN)
  725 #error "You must define all or none of CAM_DEBUG_BUS, CAM_DEBUG_TARGET \
  726         and CAM_DEBUG_LUN"
  727 #endif /* !CAM_DEBUG_BUS || !CAM_DEBUG_TARGET || !CAM_DEBUG_LUN */
  728 #else /* !CAMDEBUG */
  729 #error "You must use options CAMDEBUG if you use the CAM_DEBUG_* options"
  730 #endif /* CAMDEBUG */
  731 #endif /* CAM_DEBUG_BUS || CAM_DEBUG_TARGET || CAM_DEBUG_LUN */
  732 
  733 /* Our boot-time initialization hook */
  734 static int cam_module_event_handler(module_t, int /*modeventtype_t*/, void *);
  735 
  736 static moduledata_t cam_moduledata = {
  737         "cam",
  738         cam_module_event_handler,
  739         NULL
  740 };
  741 
  742 static void     xpt_init(void *);
  743 
  744 DECLARE_MODULE(cam, cam_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND);
  745 MODULE_VERSION(cam, 1);
  746 
  747 
  748 static cam_status       xpt_compile_path(struct cam_path *new_path,
  749                                          struct cam_periph *perph,
  750                                          path_id_t path_id,
  751                                          target_id_t target_id,
  752                                          lun_id_t lun_id);
  753 
  754 static void             xpt_release_path(struct cam_path *path);
  755 
  756 static void             xpt_async_bcast(struct async_list *async_head,
  757                                         u_int32_t async_code,
  758                                         struct cam_path *path,
  759                                         void *async_arg);
  760 static void             xpt_dev_async(u_int32_t async_code,
  761                                       struct cam_eb *bus,
  762                                       struct cam_et *target,
  763                                       struct cam_ed *device,
  764                                       void *async_arg);
  765 static path_id_t xptnextfreepathid(void);
  766 static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus);
  767 static union ccb *xpt_get_ccb(struct cam_ed *device);
  768 static int       xpt_schedule_dev(struct camq *queue, cam_pinfo *dev_pinfo,
  769                                   u_int32_t new_priority);
  770 static void      xpt_run_dev_allocq(struct cam_eb *bus);
  771 static void      xpt_run_dev_sendq(struct cam_eb *bus);
  772 static timeout_t xpt_release_devq_timeout;
  773 static timeout_t xpt_release_simq_timeout;
  774 static void      xpt_release_bus(struct cam_eb *bus);
  775 static void      xpt_release_devq_device(struct cam_ed *dev, u_int count,
  776                                          int run_queue);
  777 static struct cam_et*
  778                  xpt_alloc_target(struct cam_eb *bus, target_id_t target_id);
  779 static void      xpt_release_target(struct cam_eb *bus, struct cam_et *target);
  780 static struct cam_ed*
  781                  xpt_alloc_device(struct cam_eb *bus, struct cam_et *target,
  782                                   lun_id_t lun_id);
  783 static void      xpt_release_device(struct cam_eb *bus, struct cam_et *target,
  784                                     struct cam_ed *device);
  785 static u_int32_t xpt_dev_ccbq_resize(struct cam_path *path, int newopenings);
  786 static struct cam_eb*
  787                  xpt_find_bus(path_id_t path_id);
  788 static struct cam_et*
  789                  xpt_find_target(struct cam_eb *bus, target_id_t target_id);
  790 static struct cam_ed*
  791                  xpt_find_device(struct cam_et *target, lun_id_t lun_id);
  792 static void      xpt_scan_bus(struct cam_periph *periph, union ccb *ccb);
  793 static void      xpt_scan_lun(struct cam_periph *periph,
  794                               struct cam_path *path, cam_flags flags,
  795                               union ccb *ccb);
  796 static void      xptscandone(struct cam_periph *periph, union ccb *done_ccb);
  797 static xpt_busfunc_t    xptconfigbuscountfunc;
  798 static xpt_busfunc_t    xptconfigfunc;
  799 static void      xpt_config(void *arg);
  800 static xpt_devicefunc_t xptpassannouncefunc;
  801 static void      xpt_finishconfig(struct cam_periph *periph, union ccb *ccb);
  802 static void      xptaction(struct cam_sim *sim, union ccb *work_ccb);
  803 static void      xptpoll(struct cam_sim *sim);
  804 static void      camisr(void *);
  805 #if 0
  806 static void      xptstart(struct cam_periph *periph, union ccb *work_ccb);
  807 static void      xptasync(struct cam_periph *periph,
  808                           u_int32_t code, cam_path *path);
  809 #endif
  810 static dev_match_ret    xptbusmatch(struct dev_match_pattern *patterns,
  811                                     u_int num_patterns, struct cam_eb *bus);
  812 static dev_match_ret    xptdevicematch(struct dev_match_pattern *patterns,
  813                                        u_int num_patterns,
  814                                        struct cam_ed *device);
  815 static dev_match_ret    xptperiphmatch(struct dev_match_pattern *patterns,
  816                                        u_int num_patterns,
  817                                        struct cam_periph *periph);
  818 static xpt_busfunc_t    xptedtbusfunc;
  819 static xpt_targetfunc_t xptedttargetfunc;
  820 static xpt_devicefunc_t xptedtdevicefunc;
  821 static xpt_periphfunc_t xptedtperiphfunc;
  822 static xpt_pdrvfunc_t   xptplistpdrvfunc;
  823 static xpt_periphfunc_t xptplistperiphfunc;
  824 static int              xptedtmatch(struct ccb_dev_match *cdm);
  825 static int              xptperiphlistmatch(struct ccb_dev_match *cdm);
  826 static int              xptbustraverse(struct cam_eb *start_bus,
  827                                        xpt_busfunc_t *tr_func, void *arg);
  828 static int              xpttargettraverse(struct cam_eb *bus,
  829                                           struct cam_et *start_target,
  830                                           xpt_targetfunc_t *tr_func, void *arg);
  831 static int              xptdevicetraverse(struct cam_et *target,
  832                                           struct cam_ed *start_device,
  833                                           xpt_devicefunc_t *tr_func, void *arg);
  834 static int              xptperiphtraverse(struct cam_ed *device,
  835                                           struct cam_periph *start_periph,
  836                                           xpt_periphfunc_t *tr_func, void *arg);
  837 static int              xptpdrvtraverse(struct periph_driver **start_pdrv,
  838                                         xpt_pdrvfunc_t *tr_func, void *arg);
  839 static int              xptpdperiphtraverse(struct periph_driver **pdrv,
  840                                             struct cam_periph *start_periph,
  841                                             xpt_periphfunc_t *tr_func,
  842                                             void *arg);
  843 static xpt_busfunc_t    xptdefbusfunc;
  844 static xpt_targetfunc_t xptdeftargetfunc;
  845 static xpt_devicefunc_t xptdefdevicefunc;
  846 static xpt_periphfunc_t xptdefperiphfunc;
  847 static int              xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg);
  848 #ifdef notusedyet
  849 static int              xpt_for_all_targets(xpt_targetfunc_t *tr_func,
  850                                             void *arg);
  851 #endif
  852 static int              xpt_for_all_devices(xpt_devicefunc_t *tr_func,
  853                                             void *arg);
  854 #ifdef notusedyet
  855 static int              xpt_for_all_periphs(xpt_periphfunc_t *tr_func,
  856                                             void *arg);
  857 #endif
  858 static xpt_devicefunc_t xptsetasyncfunc;
  859 static xpt_busfunc_t    xptsetasyncbusfunc;
  860 static cam_status       xptregister(struct cam_periph *periph,
  861                                     void *arg);
  862 static cam_status       proberegister(struct cam_periph *periph,
  863                                       void *arg);
  864 static void      probeschedule(struct cam_periph *probe_periph);
  865 static void      probestart(struct cam_periph *periph, union ccb *start_ccb);
  866 static void      proberequestdefaultnegotiation(struct cam_periph *periph);
  867 static void      probedone(struct cam_periph *periph, union ccb *done_ccb);
  868 static void      probecleanup(struct cam_periph *periph);
  869 static void      xpt_find_quirk(struct cam_ed *device);
  870 #ifdef CAM_NEW_TRAN_CODE
  871 static void      xpt_devise_transport(struct cam_path *path);
  872 #endif /* CAM_NEW_TRAN_CODE */
  873 static void      xpt_set_transfer_settings(struct ccb_trans_settings *cts,
  874                                            struct cam_ed *device,
  875                                            int async_update);
  876 static void      xpt_toggle_tags(struct cam_path *path);
  877 static void      xpt_start_tags(struct cam_path *path);
  878 static __inline int xpt_schedule_dev_allocq(struct cam_eb *bus,
  879                                             struct cam_ed *dev);
  880 static __inline int xpt_schedule_dev_sendq(struct cam_eb *bus,
  881                                            struct cam_ed *dev);
  882 static __inline int periph_is_queued(struct cam_periph *periph);
  883 static __inline int device_is_alloc_queued(struct cam_ed *device);
  884 static __inline int device_is_send_queued(struct cam_ed *device);
  885 static __inline int dev_allocq_is_runnable(struct cam_devq *devq);
  886 
  887 static __inline int
  888 xpt_schedule_dev_allocq(struct cam_eb *bus, struct cam_ed *dev)
  889 {
  890         int retval;
  891 
  892         if (dev->ccbq.devq_openings > 0) {
  893                 if ((dev->flags & CAM_DEV_RESIZE_QUEUE_NEEDED) != 0) {
  894                         cam_ccbq_resize(&dev->ccbq,
  895                                         dev->ccbq.dev_openings
  896                                         + dev->ccbq.dev_active);
  897                         dev->flags &= ~CAM_DEV_RESIZE_QUEUE_NEEDED;
  898                 }
  899                 /*
  900                  * The priority of a device waiting for CCB resources
  901                  * is that of the the highest priority peripheral driver
  902                  * enqueued.
  903                  */
  904                 retval = xpt_schedule_dev(&bus->sim->devq->alloc_queue,
  905                                           &dev->alloc_ccb_entry.pinfo,
  906                                           CAMQ_GET_HEAD(&dev->drvq)->priority); 
  907         } else {
  908                 retval = 0;
  909         }
  910 
  911         return (retval);
  912 }
  913 
  914 static __inline int
  915 xpt_schedule_dev_sendq(struct cam_eb *bus, struct cam_ed *dev)
  916 {
  917         int     retval;
  918 
  919         if (dev->ccbq.dev_openings > 0) {
  920                 /*
  921                  * The priority of a device waiting for controller
  922                  * resources is that of the the highest priority CCB
  923                  * enqueued.
  924                  */
  925                 retval =
  926                     xpt_schedule_dev(&bus->sim->devq->send_queue,
  927                                      &dev->send_ccb_entry.pinfo,
  928                                      CAMQ_GET_HEAD(&dev->ccbq.queue)->priority);
  929         } else {
  930                 retval = 0;
  931         }
  932         return (retval);
  933 }
  934 
  935 static __inline int
  936 periph_is_queued(struct cam_periph *periph)
  937 {
  938         return (periph->pinfo.index != CAM_UNQUEUED_INDEX);
  939 }
  940 
  941 static __inline int
  942 device_is_alloc_queued(struct cam_ed *device)
  943 {
  944         return (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
  945 }
  946 
  947 static __inline int
  948 device_is_send_queued(struct cam_ed *device)
  949 {
  950         return (device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
  951 }
  952 
  953 static __inline int
  954 dev_allocq_is_runnable(struct cam_devq *devq)
  955 {
  956         /*
  957          * Have work to do.
  958          * Have space to do more work.
  959          * Allowed to do work.
  960          */
  961         return ((devq->alloc_queue.qfrozen_cnt == 0)
  962              && (devq->alloc_queue.entries > 0)
  963              && (devq->alloc_openings > 0));
  964 }
  965 
  966 static void
  967 xpt_periph_init()
  968 {
  969         make_dev(&xpt_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "xpt0");
  970 }
  971 
  972 static void
  973 probe_periph_init()
  974 {
  975 }
  976 
  977 
  978 static void
  979 xptdone(struct cam_periph *periph, union ccb *done_ccb)
  980 {
  981         /* Caller will release the CCB */
  982         wakeup(&done_ccb->ccb_h.cbfcnp);
  983 }
  984 
  985 static int
  986 xptopen(struct cdev *dev, int flags, int fmt, struct thread *td)
  987 {
  988         int unit;
  989 
  990         unit = minor(dev) & 0xff;
  991 
  992         /*
  993          * Only allow read-write access.
  994          */
  995         if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0))
  996                 return(EPERM);
  997 
  998         /*
  999          * We don't allow nonblocking access.
 1000          */
 1001         if ((flags & O_NONBLOCK) != 0) {
 1002                 printf("xpt%d: can't do nonblocking access\n", unit);
 1003                 return(ENODEV);
 1004         }
 1005 
 1006         /*
 1007          * We only have one transport layer right now.  If someone accesses
 1008          * us via something other than minor number 1, point out their
 1009          * mistake.
 1010          */
 1011         if (unit != 0) {
 1012                 printf("xptopen: got invalid xpt unit %d\n", unit);
 1013                 return(ENXIO);
 1014         }
 1015 
 1016         /* Mark ourselves open */
 1017         xsoftc.flags |= XPT_FLAG_OPEN;
 1018         
 1019         return(0);
 1020 }
 1021 
 1022 static int
 1023 xptclose(struct cdev *dev, int flag, int fmt, struct thread *td)
 1024 {
 1025         int unit;
 1026 
 1027         unit = minor(dev) & 0xff;
 1028 
 1029         /*
 1030          * We only have one transport layer right now.  If someone accesses
 1031          * us via something other than minor number 1, point out their
 1032          * mistake.
 1033          */
 1034         if (unit != 0) {
 1035                 printf("xptclose: got invalid xpt unit %d\n", unit);
 1036                 return(ENXIO);
 1037         }
 1038 
 1039         /* Mark ourselves closed */
 1040         xsoftc.flags &= ~XPT_FLAG_OPEN;
 1041 
 1042         return(0);
 1043 }
 1044 
 1045 static int
 1046 xptioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
 1047 {
 1048         int unit, error;
 1049 
 1050         error = 0;
 1051         unit = minor(dev) & 0xff;
 1052 
 1053         /*
 1054          * We only have one transport layer right now.  If someone accesses
 1055          * us via something other than minor number 1, point out their
 1056          * mistake.
 1057          */
 1058         if (unit != 0) {
 1059                 printf("xptioctl: got invalid xpt unit %d\n", unit);
 1060                 return(ENXIO);
 1061         }
 1062 
 1063         switch(cmd) {
 1064         /*
 1065          * For the transport layer CAMIOCOMMAND ioctl, we really only want
 1066          * to accept CCB types that don't quite make sense to send through a
 1067          * passthrough driver. XPT_PATH_INQ is an exception to this, as stated
 1068          * in the CAM spec.
 1069          */
 1070         case CAMIOCOMMAND: {
 1071                 union ccb *ccb;
 1072                 union ccb *inccb;
 1073 
 1074                 inccb = (union ccb *)addr;
 1075 
 1076                 switch(inccb->ccb_h.func_code) {
 1077                 case XPT_SCAN_BUS:
 1078                 case XPT_RESET_BUS:
 1079                         if ((inccb->ccb_h.target_id != CAM_TARGET_WILDCARD)
 1080                          || (inccb->ccb_h.target_lun != CAM_LUN_WILDCARD)) {
 1081                                 error = EINVAL;
 1082                                 break;
 1083                         }
 1084                         /* FALLTHROUGH */
 1085                 case XPT_PATH_INQ:
 1086                 case XPT_ENG_INQ:
 1087                 case XPT_SCAN_LUN:
 1088 
 1089                         ccb = xpt_alloc_ccb();
 1090 
 1091                         /*
 1092                          * Create a path using the bus, target, and lun the
 1093                          * user passed in.
 1094                          */
 1095                         if (xpt_create_path(&ccb->ccb_h.path, xpt_periph,
 1096                                             inccb->ccb_h.path_id,
 1097                                             inccb->ccb_h.target_id,
 1098                                             inccb->ccb_h.target_lun) !=
 1099                                             CAM_REQ_CMP){
 1100                                 error = EINVAL;
 1101                                 xpt_free_ccb(ccb);
 1102                                 break;
 1103                         }
 1104                         /* Ensure all of our fields are correct */
 1105                         xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path,
 1106                                       inccb->ccb_h.pinfo.priority);
 1107                         xpt_merge_ccb(ccb, inccb);
 1108                         ccb->ccb_h.cbfcnp = xptdone;
 1109                         cam_periph_runccb(ccb, NULL, 0, 0, NULL);
 1110                         bcopy(ccb, inccb, sizeof(union ccb));
 1111                         xpt_free_path(ccb->ccb_h.path);
 1112                         xpt_free_ccb(ccb);
 1113                         break;
 1114 
 1115                 case XPT_DEBUG: {
 1116                         union ccb ccb;
 1117 
 1118                         /*
 1119                          * This is an immediate CCB, so it's okay to
 1120                          * allocate it on the stack.
 1121                          */
 1122 
 1123                         /*
 1124                          * Create a path using the bus, target, and lun the
 1125                          * user passed in.
 1126                          */
 1127                         if (xpt_create_path(&ccb.ccb_h.path, xpt_periph,
 1128                                             inccb->ccb_h.path_id,
 1129                                             inccb->ccb_h.target_id,
 1130                                             inccb->ccb_h.target_lun) !=
 1131                                             CAM_REQ_CMP){
 1132                                 error = EINVAL;
 1133                                 break;
 1134                         }
 1135                         /* Ensure all of our fields are correct */
 1136                         xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path,
 1137                                       inccb->ccb_h.pinfo.priority);
 1138                         xpt_merge_ccb(&ccb, inccb);
 1139                         ccb.ccb_h.cbfcnp = xptdone;
 1140                         xpt_action(&ccb);
 1141                         bcopy(&ccb, inccb, sizeof(union ccb));
 1142                         xpt_free_path(ccb.ccb_h.path);
 1143                         break;
 1144 
 1145                 }
 1146                 case XPT_DEV_MATCH: {
 1147                         struct cam_periph_map_info mapinfo;
 1148                         struct cam_path *old_path;
 1149 
 1150                         /*
 1151                          * We can't deal with physical addresses for this
 1152                          * type of transaction.
 1153                          */
 1154                         if (inccb->ccb_h.flags & CAM_DATA_PHYS) {
 1155                                 error = EINVAL;
 1156                                 break;
 1157                         }
 1158 
 1159                         /*
 1160                          * Save this in case the caller had it set to
 1161                          * something in particular.
 1162                          */
 1163                         old_path = inccb->ccb_h.path;
 1164 
 1165                         /*
 1166                          * We really don't need a path for the matching
 1167                          * code.  The path is needed because of the
 1168                          * debugging statements in xpt_action().  They
 1169                          * assume that the CCB has a valid path.
 1170                          */
 1171                         inccb->ccb_h.path = xpt_periph->path;
 1172 
 1173                         bzero(&mapinfo, sizeof(mapinfo));
 1174 
 1175                         /*
 1176                          * Map the pattern and match buffers into kernel
 1177                          * virtual address space.
 1178                          */
 1179                         error = cam_periph_mapmem(inccb, &mapinfo);
 1180 
 1181                         if (error) {
 1182                                 inccb->ccb_h.path = old_path;
 1183                                 break;
 1184                         }
 1185 
 1186                         /*
 1187                          * This is an immediate CCB, we can send it on directly.
 1188                          */
 1189                         xpt_action(inccb);
 1190 
 1191                         /*
 1192                          * Map the buffers back into user space.
 1193                          */
 1194                         cam_periph_unmapmem(inccb, &mapinfo);
 1195 
 1196                         inccb->ccb_h.path = old_path;
 1197 
 1198                         error = 0;
 1199                         break;
 1200                 }
 1201                 default:
 1202                         error = ENOTSUP;
 1203                         break;
 1204                 }
 1205                 break;
 1206         }
 1207         /*
 1208          * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input,
 1209          * with the periphal driver name and unit name filled in.  The other
 1210          * fields don't really matter as input.  The passthrough driver name
 1211          * ("pass"), and unit number are passed back in the ccb.  The current
 1212          * device generation number, and the index into the device peripheral
 1213          * driver list, and the status are also passed back.  Note that
 1214          * since we do everything in one pass, unlike the XPT_GDEVLIST ccb,
 1215          * we never return a status of CAM_GDEVLIST_LIST_CHANGED.  It is
 1216          * (or rather should be) impossible for the device peripheral driver
 1217          * list to change since we look at the whole thing in one pass, and
 1218          * we do it with splcam protection.
 1219          * 
 1220          */
 1221         case CAMGETPASSTHRU: {
 1222                 union ccb *ccb;
 1223                 struct cam_periph *periph;
 1224                 struct periph_driver **p_drv;
 1225                 char   *name;
 1226                 u_int unit;
 1227                 u_int cur_generation;
 1228                 int base_periph_found;
 1229                 int splbreaknum;
 1230                 int s;
 1231 
 1232                 ccb = (union ccb *)addr;
 1233                 unit = ccb->cgdl.unit_number;
 1234                 name = ccb->cgdl.periph_name;
 1235                 /*
 1236                  * Every 100 devices, we want to drop our spl protection to
 1237                  * give the software interrupt handler a chance to run.
 1238                  * Most systems won't run into this check, but this should
 1239                  * avoid starvation in the software interrupt handler in
 1240                  * large systems.
 1241                  */
 1242                 splbreaknum = 100;
 1243 
 1244                 ccb = (union ccb *)addr;
 1245 
 1246                 base_periph_found = 0;
 1247 
 1248                 /*
 1249                  * Sanity check -- make sure we don't get a null peripheral
 1250                  * driver name.
 1251                  */
 1252                 if (*ccb->cgdl.periph_name == '\0') {
 1253                         error = EINVAL;
 1254                         break;
 1255                 }
 1256 
 1257                 /* Keep the list from changing while we traverse it */
 1258                 s = splcam();
 1259 ptstartover:
 1260                 cur_generation = xsoftc.generation;
 1261 
 1262                 /* first find our driver in the list of drivers */
 1263                 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++)
 1264                         if (strcmp((*p_drv)->driver_name, name) == 0)
 1265                                 break;
 1266 
 1267                 if (*p_drv == NULL) {
 1268                         splx(s);
 1269                         ccb->ccb_h.status = CAM_REQ_CMP_ERR;
 1270                         ccb->cgdl.status = CAM_GDEVLIST_ERROR;
 1271                         *ccb->cgdl.periph_name = '\0';
 1272                         ccb->cgdl.unit_number = 0;
 1273                         error = ENOENT;
 1274                         break;
 1275                 }       
 1276 
 1277                 /*
 1278                  * Run through every peripheral instance of this driver
 1279                  * and check to see whether it matches the unit passed
 1280                  * in by the user.  If it does, get out of the loops and
 1281                  * find the passthrough driver associated with that
 1282                  * peripheral driver.
 1283                  */
 1284                 for (periph = TAILQ_FIRST(&(*p_drv)->units); periph != NULL;
 1285                      periph = TAILQ_NEXT(periph, unit_links)) {
 1286 
 1287                         if (periph->unit_number == unit) {
 1288                                 break;
 1289                         } else if (--splbreaknum == 0) {
 1290                                 splx(s);
 1291                                 s = splcam();
 1292                                 splbreaknum = 100;
 1293                                 if (cur_generation != xsoftc.generation)
 1294                                        goto ptstartover;
 1295                         }
 1296                 }
 1297                 /*
 1298                  * If we found the peripheral driver that the user passed
 1299                  * in, go through all of the peripheral drivers for that
 1300                  * particular device and look for a passthrough driver.
 1301                  */
 1302                 if (periph != NULL) {
 1303                         struct cam_ed *device;
 1304                         int i;
 1305 
 1306                         base_periph_found = 1;
 1307                         device = periph->path->device;
 1308                         for (i = 0, periph = SLIST_FIRST(&device->periphs);
 1309                              periph != NULL;
 1310                              periph = SLIST_NEXT(periph, periph_links), i++) {
 1311                                 /*
 1312                                  * Check to see whether we have a
 1313                                  * passthrough device or not. 
 1314                                  */
 1315                                 if (strcmp(periph->periph_name, "pass") == 0) {
 1316                                         /*
 1317                                          * Fill in the getdevlist fields.
 1318                                          */
 1319                                         strcpy(ccb->cgdl.periph_name,
 1320                                                periph->periph_name);
 1321                                         ccb->cgdl.unit_number =
 1322                                                 periph->unit_number;
 1323                                         if (SLIST_NEXT(periph, periph_links))
 1324                                                 ccb->cgdl.status =
 1325                                                         CAM_GDEVLIST_MORE_DEVS;
 1326                                         else
 1327                                                 ccb->cgdl.status =
 1328                                                        CAM_GDEVLIST_LAST_DEVICE;
 1329                                         ccb->cgdl.generation =
 1330                                                 device->generation;
 1331                                         ccb->cgdl.index = i;
 1332                                         /*
 1333                                          * Fill in some CCB header fields
 1334                                          * that the user may want.
 1335                                          */
 1336                                         ccb->ccb_h.path_id =
 1337                                                 periph->path->bus->path_id;
 1338                                         ccb->ccb_h.target_id =
 1339                                                 periph->path->target->target_id;
 1340                                         ccb->ccb_h.target_lun =
 1341                                                 periph->path->device->lun_id;
 1342                                         ccb->ccb_h.status = CAM_REQ_CMP;
 1343                                         break;
 1344                                 }
 1345                         }
 1346                 }
 1347 
 1348                 /*
 1349                  * If the periph is null here, one of two things has
 1350                  * happened.  The first possibility is that we couldn't
 1351                  * find the unit number of the particular peripheral driver
 1352                  * that the user is asking about.  e.g. the user asks for
 1353                  * the passthrough driver for "da11".  We find the list of
 1354                  * "da" peripherals all right, but there is no unit 11.
 1355                  * The other possibility is that we went through the list
 1356                  * of peripheral drivers attached to the device structure,
 1357                  * but didn't find one with the name "pass".  Either way,
 1358                  * we return ENOENT, since we couldn't find something.
 1359                  */
 1360                 if (periph == NULL) {
 1361                         ccb->ccb_h.status = CAM_REQ_CMP_ERR;
 1362                         ccb->cgdl.status = CAM_GDEVLIST_ERROR;
 1363                         *ccb->cgdl.periph_name = '\0';
 1364                         ccb->cgdl.unit_number = 0;
 1365                         error = ENOENT;
 1366                         /*
 1367                          * It is unfortunate that this is even necessary,
 1368                          * but there are many, many clueless users out there.
 1369                          * If this is true, the user is looking for the
 1370                          * passthrough driver, but doesn't have one in his
 1371                          * kernel.
 1372                          */
 1373                         if (base_periph_found == 1) {
 1374                                 printf("xptioctl: pass driver is not in the "
 1375                                        "kernel\n");
 1376                                 printf("xptioctl: put \"device pass0\" in "
 1377                                        "your kernel config file\n");
 1378                         }
 1379                 }
 1380                 splx(s);
 1381                 break;
 1382                 }
 1383         default:
 1384                 error = ENOTTY;
 1385                 break;
 1386         }
 1387 
 1388         return(error);
 1389 }
 1390 
 1391 static int
 1392 cam_module_event_handler(module_t mod, int what, void *arg)
 1393 {
 1394         if (what == MOD_LOAD) {
 1395                 xpt_init(NULL);
 1396         } else if (what == MOD_UNLOAD) {
 1397                 return EBUSY;
 1398         } else {
 1399                 return EOPNOTSUPP;
 1400         }
 1401 
 1402         return 0;
 1403 }
 1404 
 1405 /* Functions accessed by the peripheral drivers */
 1406 static void
 1407 xpt_init(dummy)
 1408         void *dummy;
 1409 {
 1410         struct cam_sim *xpt_sim;
 1411         struct cam_path *path;
 1412         struct cam_devq *devq;
 1413         cam_status status;
 1414 
 1415         TAILQ_INIT(&xpt_busses);
 1416         TAILQ_INIT(&cam_bioq);
 1417         SLIST_INIT(&ccb_freeq);
 1418         STAILQ_INIT(&highpowerq);
 1419 
 1420         mtx_init(&cam_bioq_lock, "CAM BIOQ lock", NULL, MTX_DEF);
 1421 
 1422         /*
 1423          * The xpt layer is, itself, the equivelent of a SIM.
 1424          * Allow 16 ccbs in the ccb pool for it.  This should
 1425          * give decent parallelism when we probe busses and
 1426          * perform other XPT functions.
 1427          */
 1428         devq = cam_simq_alloc(16);
 1429         xpt_sim = cam_sim_alloc(xptaction,
 1430                                 xptpoll,
 1431                                 "xpt",
 1432                                 /*softc*/NULL,
 1433                                 /*unit*/0,
 1434                                 /*max_dev_transactions*/0,
 1435                                 /*max_tagged_dev_transactions*/0,
 1436                                 devq);
 1437         xpt_max_ccbs = 16;
 1438                                 
 1439         xpt_bus_register(xpt_sim, /*bus #*/0);
 1440 
 1441         /*
 1442          * Looking at the XPT from the SIM layer, the XPT is
 1443          * the equivelent of a peripheral driver.  Allocate
 1444          * a peripheral driver entry for us.
 1445          */
 1446         if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID,
 1447                                       CAM_TARGET_WILDCARD,
 1448                                       CAM_LUN_WILDCARD)) != CAM_REQ_CMP) {
 1449                 printf("xpt_init: xpt_create_path failed with status %#x,"
 1450                        " failing attach\n", status);
 1451                 return;
 1452         }
 1453 
 1454         cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO,
 1455                          path, NULL, 0, NULL);
 1456         xpt_free_path(path);
 1457 
 1458         xpt_sim->softc = xpt_periph;
 1459 
 1460         /*
 1461          * Register a callback for when interrupts are enabled.
 1462          */
 1463         xpt_config_hook =
 1464             (struct intr_config_hook *)malloc(sizeof(struct intr_config_hook),
 1465                                               M_TEMP, M_NOWAIT | M_ZERO);
 1466         if (xpt_config_hook == NULL) {
 1467                 printf("xpt_init: Cannot malloc config hook "
 1468                        "- failing attach\n");
 1469                 return;
 1470         }
 1471 
 1472         xpt_config_hook->ich_func = xpt_config;
 1473         if (config_intrhook_establish(xpt_config_hook) != 0) {
 1474                 free (xpt_config_hook, M_TEMP);
 1475                 printf("xpt_init: config_intrhook_establish failed "
 1476                        "- failing attach\n");
 1477         }
 1478 
 1479         /* Install our software interrupt handlers */
 1480         swi_add(NULL, "cambio", camisr, &cam_bioq, SWI_CAMBIO, 0, &cambio_ih);
 1481 }
 1482 
 1483 static cam_status
 1484 xptregister(struct cam_periph *periph, void *arg)
 1485 {
 1486         if (periph == NULL) {
 1487                 printf("xptregister: periph was NULL!!\n");
 1488                 return(CAM_REQ_CMP_ERR);
 1489         }
 1490 
 1491         periph->softc = NULL;
 1492 
 1493         xpt_periph = periph;
 1494 
 1495         return(CAM_REQ_CMP);
 1496 }
 1497 
 1498 int32_t
 1499 xpt_add_periph(struct cam_periph *periph)
 1500 {
 1501         struct cam_ed *device;
 1502         int32_t  status;
 1503         struct periph_list *periph_head;
 1504 
 1505         GIANT_REQUIRED;
 1506 
 1507         device = periph->path->device;
 1508 
 1509         periph_head = &device->periphs;
 1510 
 1511         status = CAM_REQ_CMP;
 1512 
 1513         if (device != NULL) {
 1514                 int s;
 1515 
 1516                 /*
 1517                  * Make room for this peripheral
 1518                  * so it will fit in the queue
 1519                  * when it's scheduled to run
 1520                  */
 1521                 s = splsoftcam();
 1522                 status = camq_resize(&device->drvq,
 1523                                      device->drvq.array_size + 1);
 1524 
 1525                 device->generation++;
 1526 
 1527                 SLIST_INSERT_HEAD(periph_head, periph, periph_links);
 1528 
 1529                 splx(s);
 1530         }
 1531 
 1532         xsoftc.generation++;
 1533 
 1534         return (status);
 1535 }
 1536 
 1537 void
 1538 xpt_remove_periph(struct cam_periph *periph)
 1539 {
 1540         struct cam_ed *device;
 1541 
 1542         GIANT_REQUIRED;
 1543 
 1544         device = periph->path->device;
 1545 
 1546         if (device != NULL) {
 1547                 int s;
 1548                 struct periph_list *periph_head;
 1549 
 1550                 periph_head = &device->periphs;
 1551                 
 1552                 /* Release the slot for this peripheral */
 1553                 s = splsoftcam();
 1554                 camq_resize(&device->drvq, device->drvq.array_size - 1);
 1555 
 1556                 device->generation++;
 1557 
 1558                 SLIST_REMOVE(periph_head, periph, cam_periph, periph_links);
 1559 
 1560                 splx(s);
 1561         }
 1562 
 1563         xsoftc.generation++;
 1564 
 1565 }
 1566 
 1567 #ifdef CAM_NEW_TRAN_CODE
 1568 
 1569 void
 1570 xpt_announce_periph(struct cam_periph *periph, char *announce_string)
 1571 {
 1572         struct  ccb_pathinq cpi;
 1573         struct  ccb_trans_settings cts;
 1574         struct  cam_path *path;
 1575         u_int   speed;
 1576         u_int   freq;
 1577         u_int   mb;
 1578         int     s;
 1579 
 1580         GIANT_REQUIRED;
 1581 
 1582         path = periph->path;
 1583         /*
 1584          * To ensure that this is printed in one piece,
 1585          * mask out CAM interrupts.
 1586          */
 1587         s = splsoftcam();
 1588         printf("%s%d at %s%d bus %d target %d lun %d\n",
 1589                periph->periph_name, periph->unit_number,
 1590                path->bus->sim->sim_name,
 1591                path->bus->sim->unit_number,
 1592                path->bus->sim->bus_id,
 1593                path->target->target_id,
 1594                path->device->lun_id);
 1595         printf("%s%d: ", periph->periph_name, periph->unit_number);
 1596         scsi_print_inquiry(&path->device->inq_data);
 1597         if (bootverbose && path->device->serial_num_len > 0) {
 1598                 /* Don't wrap the screen  - print only the first 60 chars */
 1599                 printf("%s%d: Serial Number %.60s\n", periph->periph_name,
 1600                        periph->unit_number, path->device->serial_num);
 1601         }
 1602         xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1);
 1603         cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
 1604         cts.type = CTS_TYPE_CURRENT_SETTINGS;
 1605         xpt_action((union ccb*)&cts);
 1606 
 1607         /* Ask the SIM for its base transfer speed */
 1608         xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
 1609         cpi.ccb_h.func_code = XPT_PATH_INQ;
 1610         xpt_action((union ccb *)&cpi);
 1611 
 1612         speed = cpi.base_transfer_speed;
 1613         freq = 0;
 1614         if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SPI) {
 1615                 struct  ccb_trans_settings_spi *spi;
 1616 
 1617                 spi = &cts.xport_specific.spi;
 1618                 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0
 1619                   && spi->sync_offset != 0) {
 1620                         freq = scsi_calc_syncsrate(spi->sync_period);
 1621                         speed = freq;
 1622                 }
 1623 
 1624                 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0)
 1625                         speed *= (0x01 << spi->bus_width);
 1626         }
 1627 
 1628         if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_FC) {
 1629                 struct  ccb_trans_settings_fc *fc = &cts.xport_specific.fc;
 1630                 if (fc->valid & CTS_FC_VALID_SPEED) {
 1631                         speed = fc->bitrate;
 1632                 }
 1633         }
 1634 
 1635         mb = speed / 1000;
 1636         if (mb > 0)
 1637                 printf("%s%d: %d.%03dMB/s transfers",
 1638                        periph->periph_name, periph->unit_number,
 1639                        mb, speed % 1000);
 1640         else
 1641                 printf("%s%d: %dKB/s transfers", periph->periph_name,
 1642                        periph->unit_number, speed);
 1643         /* Report additional information about SPI connections */
 1644         if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SPI) {
 1645                 struct  ccb_trans_settings_spi *spi;
 1646 
 1647                 spi = &cts.xport_specific.spi;
 1648                 if (freq != 0) {
 1649                         printf(" (%d.%03dMHz%s, offset %d", freq / 1000,
 1650                                freq % 1000,
 1651                                (spi->ppr_options & MSG_EXT_PPR_DT_REQ) != 0
 1652                              ? " DT" : "",
 1653                                spi->sync_offset);
 1654                 }
 1655                 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0
 1656                  && spi->bus_width > 0) {
 1657                         if (freq != 0) {
 1658                                 printf(", ");
 1659                         } else {
 1660                                 printf(" (");
 1661                         }
 1662                         printf("%dbit)", 8 * (0x01 << spi->bus_width));
 1663                 } else if (freq != 0) {
 1664                         printf(")");
 1665                 }
 1666         }
 1667         if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_FC) {
 1668                 struct  ccb_trans_settings_fc *fc;
 1669 
 1670                 fc = &cts.xport_specific.fc;
 1671                 if (fc->valid & CTS_FC_VALID_WWNN)
 1672                         printf(" WWNN 0x%llx", (long long) fc->wwnn);
 1673                 if (fc->valid & CTS_FC_VALID_WWPN)
 1674                         printf(" WWPN 0x%llx", (long long) fc->wwpn);
 1675                 if (fc->valid & CTS_FC_VALID_PORT)
 1676                         printf(" PortID 0x%x", fc->port);
 1677         }
 1678 
 1679         if (path->device->inq_flags & SID_CmdQue
 1680          || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
 1681                 printf("\n%s%d: Tagged Queueing Enabled",
 1682                        periph->periph_name, periph->unit_number);
 1683         }
 1684         printf("\n");
 1685 
 1686         /*
 1687          * We only want to print the caller's announce string if they've
 1688          * passed one in..
 1689          */
 1690         if (announce_string != NULL)
 1691                 printf("%s%d: %s\n", periph->periph_name,
 1692                        periph->unit_number, announce_string);
 1693         splx(s);
 1694 }
 1695 #else /* CAM_NEW_TRAN_CODE */
 1696 void
 1697 xpt_announce_periph(struct cam_periph *periph, char *announce_string)
 1698 {
 1699         int s;
 1700         u_int mb;
 1701         struct cam_path *path;
 1702         struct ccb_trans_settings cts;
 1703 
 1704         GIANT_REQUIRED;
 1705 
 1706         path = periph->path;
 1707         /*
 1708          * To ensure that this is printed in one piece,
 1709          * mask out CAM interrupts.
 1710          */
 1711         s = splsoftcam();
 1712         printf("%s%d at %s%d bus %d target %d lun %d\n",
 1713                periph->periph_name, periph->unit_number,
 1714                path->bus->sim->sim_name,
 1715                path->bus->sim->unit_number,
 1716                path->bus->sim->bus_id,
 1717                path->target->target_id,
 1718                path->device->lun_id);
 1719         printf("%s%d: ", periph->periph_name, periph->unit_number);
 1720         scsi_print_inquiry(&path->device->inq_data);
 1721         if ((bootverbose)
 1722          && (path->device->serial_num_len > 0)) {
 1723                 /* Don't wrap the screen  - print only the first 60 chars */
 1724                 printf("%s%d: Serial Number %.60s\n", periph->periph_name,
 1725                        periph->unit_number, path->device->serial_num);
 1726         }
 1727         xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1);
 1728         cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
 1729         cts.flags = CCB_TRANS_CURRENT_SETTINGS;
 1730         xpt_action((union ccb*)&cts);
 1731         if (cts.ccb_h.status == CAM_REQ_CMP) {
 1732                 u_int speed;
 1733                 u_int freq;
 1734 
 1735                 if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
 1736                   && cts.sync_offset != 0) {
 1737                         freq = scsi_calc_syncsrate(cts.sync_period);
 1738                         speed = freq;
 1739                 } else {
 1740                         struct ccb_pathinq cpi;
 1741 
 1742                         /* Ask the SIM for its base transfer speed */
 1743                         xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
 1744                         cpi.ccb_h.func_code = XPT_PATH_INQ;
 1745                         xpt_action((union ccb *)&cpi);
 1746 
 1747                         speed = cpi.base_transfer_speed;
 1748                         freq = 0;
 1749                 }
 1750                 if ((cts.valid & CCB_TRANS_BUS_WIDTH_VALID) != 0)
 1751                         speed *= (0x01 << cts.bus_width);
 1752                 mb = speed / 1000;
 1753                 if (mb > 0)
 1754                         printf("%s%d: %d.%03dMB/s transfers",
 1755                                periph->periph_name, periph->unit_number,
 1756                                mb, speed % 1000);
 1757                 else
 1758                         printf("%s%d: %dKB/s transfers", periph->periph_name,
 1759                                periph->unit_number, speed);
 1760                 if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
 1761                  && cts.sync_offset != 0) {
 1762                         printf(" (%d.%03dMHz, offset %d", freq / 1000,
 1763                                freq % 1000, cts.sync_offset);
 1764                 }
 1765                 if ((cts.valid & CCB_TRANS_BUS_WIDTH_VALID) != 0
 1766                  && cts.bus_width > 0) {
 1767                         if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
 1768                          && cts.sync_offset != 0) {
 1769                                 printf(", ");
 1770                         } else {
 1771                                 printf(" (");
 1772                         }
 1773                         printf("%dbit)", 8 * (0x01 << cts.bus_width));
 1774                 } else if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
 1775                         && cts.sync_offset != 0) {
 1776                         printf(")");
 1777                 }
 1778 
 1779                 if (path->device->inq_flags & SID_CmdQue
 1780                  || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
 1781                         printf(", Tagged Queueing Enabled");
 1782                 }
 1783 
 1784                 printf("\n");
 1785         } else if (path->device->inq_flags & SID_CmdQue
 1786                 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
 1787                 printf("%s%d: Tagged Queueing Enabled\n",
 1788                        periph->periph_name, periph->unit_number);
 1789         }
 1790 
 1791         /*
 1792          * We only want to print the caller's announce string if they've
 1793          * passed one in..
 1794          */
 1795         if (announce_string != NULL)
 1796                 printf("%s%d: %s\n", periph->periph_name,
 1797                        periph->unit_number, announce_string);
 1798         splx(s);
 1799 }
 1800 
 1801 #endif /* CAM_NEW_TRAN_CODE */
 1802 
 1803 static dev_match_ret
 1804 xptbusmatch(struct dev_match_pattern *patterns, u_int num_patterns,
 1805             struct cam_eb *bus)
 1806 {
 1807         dev_match_ret retval;
 1808         int i;
 1809 
 1810         retval = DM_RET_NONE;
 1811 
 1812         /*
 1813          * If we aren't given something to match against, that's an error.
 1814          */
 1815         if (bus == NULL)
 1816                 return(DM_RET_ERROR);
 1817 
 1818         /*
 1819          * If there are no match entries, then this bus matches no
 1820          * matter what.
 1821          */
 1822         if ((patterns == NULL) || (num_patterns == 0))
 1823                 return(DM_RET_DESCEND | DM_RET_COPY);
 1824 
 1825         for (i = 0; i < num_patterns; i++) {
 1826                 struct bus_match_pattern *cur_pattern;
 1827 
 1828                 /*
 1829                  * If the pattern in question isn't for a bus node, we
 1830                  * aren't interested.  However, we do indicate to the
 1831                  * calling routine that we should continue descending the
 1832                  * tree, since the user wants to match against lower-level
 1833                  * EDT elements.
 1834                  */
 1835                 if (patterns[i].type != DEV_MATCH_BUS) {
 1836                         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
 1837                                 retval |= DM_RET_DESCEND;
 1838                         continue;
 1839                 }
 1840 
 1841                 cur_pattern = &patterns[i].pattern.bus_pattern;
 1842 
 1843                 /*
 1844                  * If they want to match any bus node, we give them any
 1845                  * device node.
 1846                  */
 1847                 if (cur_pattern->flags == BUS_MATCH_ANY) {
 1848                         /* set the copy flag */
 1849                         retval |= DM_RET_COPY;
 1850 
 1851                         /*
 1852                          * If we've already decided on an action, go ahead
 1853                          * and return.
 1854                          */
 1855                         if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
 1856                                 return(retval);
 1857                 }
 1858 
 1859                 /*
 1860                  * Not sure why someone would do this...
 1861                  */
 1862                 if (cur_pattern->flags == BUS_MATCH_NONE)
 1863                         continue;
 1864 
 1865                 if (((cur_pattern->flags & BUS_MATCH_PATH) != 0)
 1866                  && (cur_pattern->path_id != bus->path_id))
 1867                         continue;
 1868 
 1869                 if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0)
 1870                  && (cur_pattern->bus_id != bus->sim->bus_id))
 1871                         continue;
 1872 
 1873                 if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0)
 1874                  && (cur_pattern->unit_number != bus->sim->unit_number))
 1875                         continue;
 1876 
 1877                 if (((cur_pattern->flags & BUS_MATCH_NAME) != 0)
 1878                  && (strncmp(cur_pattern->dev_name, bus->sim->sim_name,
 1879                              DEV_IDLEN) != 0))
 1880                         continue;
 1881 
 1882                 /*
 1883                  * If we get to this point, the user definitely wants 
 1884                  * information on this bus.  So tell the caller to copy the
 1885                  * data out.
 1886                  */
 1887                 retval |= DM_RET_COPY;
 1888 
 1889                 /*
 1890                  * If the return action has been set to descend, then we
 1891                  * know that we've already seen a non-bus matching
 1892                  * expression, therefore we need to further descend the tree.
 1893                  * This won't change by continuing around the loop, so we
 1894                  * go ahead and return.  If we haven't seen a non-bus
 1895                  * matching expression, we keep going around the loop until
 1896                  * we exhaust the matching expressions.  We'll set the stop
 1897                  * flag once we fall out of the loop.
 1898                  */
 1899                 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
 1900                         return(retval);
 1901         }
 1902 
 1903         /*
 1904          * If the return action hasn't been set to descend yet, that means
 1905          * we haven't seen anything other than bus matching patterns.  So
 1906          * tell the caller to stop descending the tree -- the user doesn't
 1907          * want to match against lower level tree elements.
 1908          */
 1909         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
 1910                 retval |= DM_RET_STOP;
 1911 
 1912         return(retval);
 1913 }
 1914 
 1915 static dev_match_ret
 1916 xptdevicematch(struct dev_match_pattern *patterns, u_int num_patterns,
 1917                struct cam_ed *device)
 1918 {
 1919         dev_match_ret retval;
 1920         int i;
 1921 
 1922         retval = DM_RET_NONE;
 1923 
 1924         /*
 1925          * If we aren't given something to match against, that's an error.
 1926          */
 1927         if (device == NULL)
 1928                 return(DM_RET_ERROR);
 1929 
 1930         /*
 1931          * If there are no match entries, then this device matches no
 1932          * matter what.
 1933          */
 1934         if ((patterns == NULL) || (num_patterns == 0))
 1935                 return(DM_RET_DESCEND | DM_RET_COPY);
 1936 
 1937         for (i = 0; i < num_patterns; i++) {
 1938                 struct device_match_pattern *cur_pattern;
 1939 
 1940                 /*
 1941                  * If the pattern in question isn't for a device node, we
 1942                  * aren't interested.
 1943                  */
 1944                 if (patterns[i].type != DEV_MATCH_DEVICE) {
 1945                         if ((patterns[i].type == DEV_MATCH_PERIPH)
 1946                          && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE))
 1947                                 retval |= DM_RET_DESCEND;
 1948                         continue;
 1949                 }
 1950 
 1951                 cur_pattern = &patterns[i].pattern.device_pattern;
 1952 
 1953                 /*
 1954                  * If they want to match any device node, we give them any
 1955                  * device node.
 1956                  */
 1957                 if (cur_pattern->flags == DEV_MATCH_ANY) {
 1958                         /* set the copy flag */
 1959                         retval |= DM_RET_COPY;
 1960 
 1961                         
 1962                         /*
 1963                          * If we've already decided on an action, go ahead
 1964                          * and return.
 1965                          */
 1966                         if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
 1967                                 return(retval);
 1968                 }
 1969 
 1970                 /*
 1971                  * Not sure why someone would do this...
 1972                  */
 1973                 if (cur_pattern->flags == DEV_MATCH_NONE)
 1974                         continue;
 1975 
 1976                 if (((cur_pattern->flags & DEV_MATCH_PATH) != 0)
 1977                  && (cur_pattern->path_id != device->target->bus->path_id))
 1978                         continue;
 1979 
 1980                 if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0)
 1981                  && (cur_pattern->target_id != device->target->target_id))
 1982                         continue;
 1983 
 1984                 if (((cur_pattern->flags & DEV_MATCH_LUN) != 0)
 1985                  && (cur_pattern->target_lun != device->lun_id))
 1986                         continue;
 1987 
 1988                 if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0)
 1989                  && (cam_quirkmatch((caddr_t)&device->inq_data,
 1990                                     (caddr_t)&cur_pattern->inq_pat,
 1991                                     1, sizeof(cur_pattern->inq_pat),
 1992                                     scsi_static_inquiry_match) == NULL))
 1993                         continue;
 1994 
 1995                 /*
 1996                  * If we get to this point, the user definitely wants 
 1997                  * information on this device.  So tell the caller to copy
 1998                  * the data out.
 1999                  */
 2000                 retval |= DM_RET_COPY;
 2001 
 2002                 /*
 2003                  * If the return action has been set to descend, then we
 2004                  * know that we've already seen a peripheral matching
 2005                  * expression, therefore we need to further descend the tree.
 2006                  * This won't change by continuing around the loop, so we
 2007                  * go ahead and return.  If we haven't seen a peripheral
 2008                  * matching expression, we keep going around the loop until
 2009                  * we exhaust the matching expressions.  We'll set the stop
 2010                  * flag once we fall out of the loop.
 2011                  */
 2012                 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
 2013                         return(retval);
 2014         }
 2015 
 2016         /*
 2017          * If the return action hasn't been set to descend yet, that means
 2018          * we haven't seen any peripheral matching patterns.  So tell the
 2019          * caller to stop descending the tree -- the user doesn't want to
 2020          * match against lower level tree elements.
 2021          */
 2022         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
 2023                 retval |= DM_RET_STOP;
 2024 
 2025         return(retval);
 2026 }
 2027 
 2028 /*
 2029  * Match a single peripheral against any number of match patterns.
 2030  */
 2031 static dev_match_ret
 2032 xptperiphmatch(struct dev_match_pattern *patterns, u_int num_patterns,
 2033                struct cam_periph *periph)
 2034 {
 2035         dev_match_ret retval;
 2036         int i;
 2037 
 2038         /*
 2039          * If we aren't given something to match against, that's an error.
 2040          */
 2041         if (periph == NULL)
 2042                 return(DM_RET_ERROR);
 2043 
 2044         /*
 2045          * If there are no match entries, then this peripheral matches no
 2046          * matter what.
 2047          */
 2048         if ((patterns == NULL) || (num_patterns == 0))
 2049                 return(DM_RET_STOP | DM_RET_COPY);
 2050 
 2051         /*
 2052          * There aren't any nodes below a peripheral node, so there's no
 2053          * reason to descend the tree any further.
 2054          */
 2055         retval = DM_RET_STOP;
 2056 
 2057         for (i = 0; i < num_patterns; i++) {
 2058                 struct periph_match_pattern *cur_pattern;
 2059 
 2060                 /*
 2061                  * If the pattern in question isn't for a peripheral, we
 2062                  * aren't interested.
 2063                  */
 2064                 if (patterns[i].type != DEV_MATCH_PERIPH)
 2065                         continue;
 2066 
 2067                 cur_pattern = &patterns[i].pattern.periph_pattern;
 2068 
 2069                 /*
 2070                  * If they want to match on anything, then we will do so.
 2071                  */
 2072                 if (cur_pattern->flags == PERIPH_MATCH_ANY) {
 2073                         /* set the copy flag */
 2074                         retval |= DM_RET_COPY;
 2075 
 2076                         /*
 2077                          * We've already set the return action to stop,
 2078                          * since there are no nodes below peripherals in
 2079                          * the tree.
 2080                          */
 2081                         return(retval);
 2082                 }
 2083 
 2084                 /*
 2085                  * Not sure why someone would do this...
 2086                  */
 2087                 if (cur_pattern->flags == PERIPH_MATCH_NONE)
 2088                         continue;
 2089 
 2090                 if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0)
 2091                  && (cur_pattern->path_id != periph->path->bus->path_id))
 2092                         continue;
 2093 
 2094                 /*
 2095                  * For the target and lun id's, we have to make sure the
 2096                  * target and lun pointers aren't NULL.  The xpt peripheral
 2097                  * has a wildcard target and device.
 2098                  */
 2099                 if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0)
 2100                  && ((periph->path->target == NULL)
 2101                  ||(cur_pattern->target_id != periph->path->target->target_id)))
 2102                         continue;
 2103 
 2104                 if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0)
 2105                  && ((periph->path->device == NULL)
 2106                  || (cur_pattern->target_lun != periph->path->device->lun_id)))
 2107                         continue;
 2108 
 2109                 if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0)
 2110                  && (cur_pattern->unit_number != periph->unit_number))
 2111                         continue;
 2112 
 2113                 if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0)
 2114                  && (strncmp(cur_pattern->periph_name, periph->periph_name,
 2115                              DEV_IDLEN) != 0))
 2116                         continue;
 2117 
 2118                 /*
 2119                  * If we get to this point, the user definitely wants 
 2120                  * information on this peripheral.  So tell the caller to
 2121                  * copy the data out.
 2122                  */
 2123                 retval |= DM_RET_COPY;
 2124 
 2125                 /*
 2126                  * The return action has already been set to stop, since
 2127                  * peripherals don't have any nodes below them in the EDT.
 2128                  */
 2129                 return(retval);
 2130         }
 2131 
 2132         /*
 2133          * If we get to this point, the peripheral that was passed in
 2134          * doesn't match any of the patterns.
 2135          */
 2136         return(retval);
 2137 }
 2138 
 2139 static int
 2140 xptedtbusfunc(struct cam_eb *bus, void *arg)
 2141 {
 2142         struct ccb_dev_match *cdm;
 2143         dev_match_ret retval;
 2144 
 2145         cdm = (struct ccb_dev_match *)arg;
 2146 
 2147         /*
 2148          * If our position is for something deeper in the tree, that means
 2149          * that we've already seen this node.  So, we keep going down.
 2150          */
 2151         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 2152          && (cdm->pos.cookie.bus == bus)
 2153          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 2154          && (cdm->pos.cookie.target != NULL))
 2155                 retval = DM_RET_DESCEND;
 2156         else
 2157                 retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus);
 2158 
 2159         /*
 2160          * If we got an error, bail out of the search.
 2161          */
 2162         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
 2163                 cdm->status = CAM_DEV_MATCH_ERROR;
 2164                 return(0);
 2165         }
 2166 
 2167         /*
 2168          * If the copy flag is set, copy this bus out.
 2169          */
 2170         if (retval & DM_RET_COPY) {
 2171                 int spaceleft, j;
 2172 
 2173                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
 2174                         sizeof(struct dev_match_result));
 2175 
 2176                 /*
 2177                  * If we don't have enough space to put in another
 2178                  * match result, save our position and tell the
 2179                  * user there are more devices to check.
 2180                  */
 2181                 if (spaceleft < sizeof(struct dev_match_result)) {
 2182                         bzero(&cdm->pos, sizeof(cdm->pos));
 2183                         cdm->pos.position_type = 
 2184                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS;
 2185 
 2186                         cdm->pos.cookie.bus = bus;
 2187                         cdm->pos.generations[CAM_BUS_GENERATION]=
 2188                                 bus_generation;
 2189                         cdm->status = CAM_DEV_MATCH_MORE;
 2190                         return(0);
 2191                 }
 2192                 j = cdm->num_matches;
 2193                 cdm->num_matches++;
 2194                 cdm->matches[j].type = DEV_MATCH_BUS;
 2195                 cdm->matches[j].result.bus_result.path_id = bus->path_id;
 2196                 cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id;
 2197                 cdm->matches[j].result.bus_result.unit_number =
 2198                         bus->sim->unit_number;
 2199                 strncpy(cdm->matches[j].result.bus_result.dev_name,
 2200                         bus->sim->sim_name, DEV_IDLEN);
 2201         }
 2202 
 2203         /*
 2204          * If the user is only interested in busses, there's no
 2205          * reason to descend to the next level in the tree.
 2206          */
 2207         if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
 2208                 return(1);
 2209 
 2210         /*
 2211          * If there is a target generation recorded, check it to
 2212          * make sure the target list hasn't changed.
 2213          */
 2214         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 2215          && (bus == cdm->pos.cookie.bus)
 2216          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 2217          && (cdm->pos.generations[CAM_TARGET_GENERATION] != 0)
 2218          && (cdm->pos.generations[CAM_TARGET_GENERATION] !=
 2219              bus->generation)) {
 2220                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
 2221                 return(0);
 2222         }
 2223 
 2224         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 2225          && (cdm->pos.cookie.bus == bus)
 2226          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 2227          && (cdm->pos.cookie.target != NULL))
 2228                 return(xpttargettraverse(bus,
 2229                                         (struct cam_et *)cdm->pos.cookie.target,
 2230                                          xptedttargetfunc, arg));
 2231         else
 2232                 return(xpttargettraverse(bus, NULL, xptedttargetfunc, arg));
 2233 }
 2234 
 2235 static int
 2236 xptedttargetfunc(struct cam_et *target, void *arg)
 2237 {
 2238         struct ccb_dev_match *cdm;
 2239 
 2240         cdm = (struct ccb_dev_match *)arg;
 2241 
 2242         /*
 2243          * If there is a device list generation recorded, check it to
 2244          * make sure the device list hasn't changed.
 2245          */
 2246         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 2247          && (cdm->pos.cookie.bus == target->bus)
 2248          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 2249          && (cdm->pos.cookie.target == target)
 2250          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
 2251          && (cdm->pos.generations[CAM_DEV_GENERATION] != 0)
 2252          && (cdm->pos.generations[CAM_DEV_GENERATION] !=
 2253              target->generation)) {
 2254                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
 2255                 return(0);
 2256         }
 2257 
 2258         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 2259          && (cdm->pos.cookie.bus == target->bus)
 2260          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 2261          && (cdm->pos.cookie.target == target)
 2262          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
 2263          && (cdm->pos.cookie.device != NULL))
 2264                 return(xptdevicetraverse(target,
 2265                                         (struct cam_ed *)cdm->pos.cookie.device,
 2266                                          xptedtdevicefunc, arg));
 2267         else
 2268                 return(xptdevicetraverse(target, NULL, xptedtdevicefunc, arg));
 2269 }
 2270 
 2271 static int
 2272 xptedtdevicefunc(struct cam_ed *device, void *arg)
 2273 {
 2274 
 2275         struct ccb_dev_match *cdm;
 2276         dev_match_ret retval;
 2277 
 2278         cdm = (struct ccb_dev_match *)arg;
 2279 
 2280         /*
 2281          * If our position is for something deeper in the tree, that means
 2282          * that we've already seen this node.  So, we keep going down.
 2283          */
 2284         if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE)
 2285          && (cdm->pos.cookie.device == device)
 2286          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
 2287          && (cdm->pos.cookie.periph != NULL))
 2288                 retval = DM_RET_DESCEND;
 2289         else
 2290                 retval = xptdevicematch(cdm->patterns, cdm->num_patterns,
 2291                                         device);
 2292 
 2293         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
 2294                 cdm->status = CAM_DEV_MATCH_ERROR;
 2295                 return(0);
 2296         }
 2297 
 2298         /*
 2299          * If the copy flag is set, copy this device out.
 2300          */
 2301         if (retval & DM_RET_COPY) {
 2302                 int spaceleft, j;
 2303 
 2304                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
 2305                         sizeof(struct dev_match_result));
 2306 
 2307                 /*
 2308                  * If we don't have enough space to put in another
 2309                  * match result, save our position and tell the
 2310                  * user there are more devices to check.
 2311                  */
 2312                 if (spaceleft < sizeof(struct dev_match_result)) {
 2313                         bzero(&cdm->pos, sizeof(cdm->pos));
 2314                         cdm->pos.position_type = 
 2315                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
 2316                                 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE;
 2317 
 2318                         cdm->pos.cookie.bus = device->target->bus;
 2319                         cdm->pos.generations[CAM_BUS_GENERATION]=
 2320                                 bus_generation;
 2321                         cdm->pos.cookie.target = device->target;
 2322                         cdm->pos.generations[CAM_TARGET_GENERATION] =
 2323                                 device->target->bus->generation;
 2324                         cdm->pos.cookie.device = device;
 2325                         cdm->pos.generations[CAM_DEV_GENERATION] = 
 2326                                 device->target->generation;
 2327                         cdm->status = CAM_DEV_MATCH_MORE;
 2328                         return(0);
 2329                 }
 2330                 j = cdm->num_matches;
 2331                 cdm->num_matches++;
 2332                 cdm->matches[j].type = DEV_MATCH_DEVICE;
 2333                 cdm->matches[j].result.device_result.path_id =
 2334                         device->target->bus->path_id;
 2335                 cdm->matches[j].result.device_result.target_id =
 2336                         device->target->target_id;
 2337                 cdm->matches[j].result.device_result.target_lun =
 2338                         device->lun_id;
 2339                 bcopy(&device->inq_data,
 2340                       &cdm->matches[j].result.device_result.inq_data,
 2341                       sizeof(struct scsi_inquiry_data));
 2342 
 2343                 /* Let the user know whether this device is unconfigured */
 2344                 if (device->flags & CAM_DEV_UNCONFIGURED)
 2345                         cdm->matches[j].result.device_result.flags =
 2346                                 DEV_RESULT_UNCONFIGURED;
 2347                 else
 2348                         cdm->matches[j].result.device_result.flags =
 2349                                 DEV_RESULT_NOFLAG;
 2350         }
 2351 
 2352         /*
 2353          * If the user isn't interested in peripherals, don't descend
 2354          * the tree any further.
 2355          */
 2356         if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
 2357                 return(1);
 2358 
 2359         /*
 2360          * If there is a peripheral list generation recorded, make sure
 2361          * it hasn't changed.
 2362          */
 2363         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 2364          && (device->target->bus == cdm->pos.cookie.bus)
 2365          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 2366          && (device->target == cdm->pos.cookie.target)
 2367          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
 2368          && (device == cdm->pos.cookie.device)
 2369          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
 2370          && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
 2371          && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
 2372              device->generation)){
 2373                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
 2374                 return(0);
 2375         }
 2376 
 2377         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 2378          && (cdm->pos.cookie.bus == device->target->bus)
 2379          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 2380          && (cdm->pos.cookie.target == device->target)
 2381          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
 2382          && (cdm->pos.cookie.device == device)
 2383          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
 2384          && (cdm->pos.cookie.periph != NULL))
 2385                 return(xptperiphtraverse(device,
 2386                                 (struct cam_periph *)cdm->pos.cookie.periph,
 2387                                 xptedtperiphfunc, arg));
 2388         else
 2389                 return(xptperiphtraverse(device, NULL, xptedtperiphfunc, arg));
 2390 }
 2391 
 2392 static int
 2393 xptedtperiphfunc(struct cam_periph *periph, void *arg)
 2394 {
 2395         struct ccb_dev_match *cdm;
 2396         dev_match_ret retval;
 2397 
 2398         cdm = (struct ccb_dev_match *)arg;
 2399 
 2400         retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
 2401 
 2402         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
 2403                 cdm->status = CAM_DEV_MATCH_ERROR;
 2404                 return(0);
 2405         }
 2406 
 2407         /*
 2408          * If the copy flag is set, copy this peripheral out.
 2409          */
 2410         if (retval & DM_RET_COPY) {
 2411                 int spaceleft, j;
 2412 
 2413                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
 2414                         sizeof(struct dev_match_result));
 2415 
 2416                 /*
 2417                  * If we don't have enough space to put in another
 2418                  * match result, save our position and tell the
 2419                  * user there are more devices to check.
 2420                  */
 2421                 if (spaceleft < sizeof(struct dev_match_result)) {
 2422                         bzero(&cdm->pos, sizeof(cdm->pos));
 2423                         cdm->pos.position_type = 
 2424                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
 2425                                 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE |
 2426                                 CAM_DEV_POS_PERIPH;
 2427 
 2428                         cdm->pos.cookie.bus = periph->path->bus;
 2429                         cdm->pos.generations[CAM_BUS_GENERATION]=
 2430                                 bus_generation;
 2431                         cdm->pos.cookie.target = periph->path->target;
 2432                         cdm->pos.generations[CAM_TARGET_GENERATION] =
 2433                                 periph->path->bus->generation;
 2434                         cdm->pos.cookie.device = periph->path->device;
 2435                         cdm->pos.generations[CAM_DEV_GENERATION] = 
 2436                                 periph->path->target->generation;
 2437                         cdm->pos.cookie.periph = periph;
 2438                         cdm->pos.generations[CAM_PERIPH_GENERATION] =
 2439                                 periph->path->device->generation;
 2440                         cdm->status = CAM_DEV_MATCH_MORE;
 2441                         return(0);
 2442                 }
 2443 
 2444                 j = cdm->num_matches;
 2445                 cdm->num_matches++;
 2446                 cdm->matches[j].type = DEV_MATCH_PERIPH;
 2447                 cdm->matches[j].result.periph_result.path_id =
 2448                         periph->path->bus->path_id;
 2449                 cdm->matches[j].result.periph_result.target_id =
 2450                         periph->path->target->target_id;
 2451                 cdm->matches[j].result.periph_result.target_lun =
 2452                         periph->path->device->lun_id;
 2453                 cdm->matches[j].result.periph_result.unit_number =
 2454                         periph->unit_number;
 2455                 strncpy(cdm->matches[j].result.periph_result.periph_name,
 2456                         periph->periph_name, DEV_IDLEN);
 2457         }
 2458 
 2459         return(1);
 2460 }
 2461 
 2462 static int
 2463 xptedtmatch(struct ccb_dev_match *cdm)
 2464 {
 2465         int ret;
 2466 
 2467         cdm->num_matches = 0;
 2468 
 2469         /*
 2470          * Check the bus list generation.  If it has changed, the user
 2471          * needs to reset everything and start over.
 2472          */
 2473         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 2474          && (cdm->pos.generations[CAM_BUS_GENERATION] != 0)
 2475          && (cdm->pos.generations[CAM_BUS_GENERATION] != bus_generation)) {
 2476                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
 2477                 return(0);
 2478         }
 2479 
 2480         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 2481          && (cdm->pos.cookie.bus != NULL))
 2482                 ret = xptbustraverse((struct cam_eb *)cdm->pos.cookie.bus,
 2483                                      xptedtbusfunc, cdm);
 2484         else
 2485                 ret = xptbustraverse(NULL, xptedtbusfunc, cdm);
 2486 
 2487         /*
 2488          * If we get back 0, that means that we had to stop before fully
 2489          * traversing the EDT.  It also means that one of the subroutines
 2490          * has set the status field to the proper value.  If we get back 1,
 2491          * we've fully traversed the EDT and copied out any matching entries.
 2492          */
 2493         if (ret == 1)
 2494                 cdm->status = CAM_DEV_MATCH_LAST;
 2495 
 2496         return(ret);
 2497 }
 2498 
 2499 static int
 2500 xptplistpdrvfunc(struct periph_driver **pdrv, void *arg)
 2501 {
 2502         struct ccb_dev_match *cdm;
 2503 
 2504         cdm = (struct ccb_dev_match *)arg;
 2505 
 2506         if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
 2507          && (cdm->pos.cookie.pdrv == pdrv)
 2508          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
 2509          && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
 2510          && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
 2511              (*pdrv)->generation)) {
 2512                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
 2513                 return(0);
 2514         }
 2515 
 2516         if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
 2517          && (cdm->pos.cookie.pdrv == pdrv)
 2518          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
 2519          && (cdm->pos.cookie.periph != NULL))
 2520                 return(xptpdperiphtraverse(pdrv,
 2521                                 (struct cam_periph *)cdm->pos.cookie.periph,
 2522                                 xptplistperiphfunc, arg));
 2523         else
 2524                 return(xptpdperiphtraverse(pdrv, NULL,xptplistperiphfunc, arg));
 2525 }
 2526 
 2527 static int
 2528 xptplistperiphfunc(struct cam_periph *periph, void *arg)
 2529 {
 2530         struct ccb_dev_match *cdm;
 2531         dev_match_ret retval;
 2532 
 2533         cdm = (struct ccb_dev_match *)arg;
 2534 
 2535         retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
 2536 
 2537         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
 2538                 cdm->status = CAM_DEV_MATCH_ERROR;
 2539                 return(0);
 2540         }
 2541 
 2542         /*
 2543          * If the copy flag is set, copy this peripheral out.
 2544          */
 2545         if (retval & DM_RET_COPY) {
 2546                 int spaceleft, j;
 2547 
 2548                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
 2549                         sizeof(struct dev_match_result));
 2550 
 2551                 /*
 2552                  * If we don't have enough space to put in another
 2553                  * match result, save our position and tell the
 2554                  * user there are more devices to check.
 2555                  */
 2556                 if (spaceleft < sizeof(struct dev_match_result)) {
 2557                         struct periph_driver **pdrv;
 2558 
 2559                         pdrv = NULL;
 2560                         bzero(&cdm->pos, sizeof(cdm->pos));
 2561                         cdm->pos.position_type = 
 2562                                 CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR |
 2563                                 CAM_DEV_POS_PERIPH;
 2564 
 2565                         /*
 2566                          * This may look a bit non-sensical, but it is
 2567                          * actually quite logical.  There are very few
 2568                          * peripheral drivers, and bloating every peripheral
 2569                          * structure with a pointer back to its parent
 2570                          * peripheral driver linker set entry would cost
 2571                          * more in the long run than doing this quick lookup.
 2572                          */
 2573                         for (pdrv = periph_drivers; *pdrv != NULL; pdrv++) {
 2574                                 if (strcmp((*pdrv)->driver_name,
 2575                                     periph->periph_name) == 0)
 2576                                         break;
 2577                         }
 2578 
 2579                         if (*pdrv == NULL) {
 2580                                 cdm->status = CAM_DEV_MATCH_ERROR;
 2581                                 return(0);
 2582                         }
 2583 
 2584                         cdm->pos.cookie.pdrv = pdrv;
 2585                         /*
 2586                          * The periph generation slot does double duty, as
 2587                          * does the periph pointer slot.  They are used for
 2588                          * both edt and pdrv lookups and positioning.
 2589                          */
 2590                         cdm->pos.cookie.periph = periph;
 2591                         cdm->pos.generations[CAM_PERIPH_GENERATION] =
 2592                                 (*pdrv)->generation;
 2593                         cdm->status = CAM_DEV_MATCH_MORE;
 2594                         return(0);
 2595                 }
 2596 
 2597                 j = cdm->num_matches;
 2598                 cdm->num_matches++;
 2599                 cdm->matches[j].type = DEV_MATCH_PERIPH;
 2600                 cdm->matches[j].result.periph_result.path_id =
 2601                         periph->path->bus->path_id;
 2602 
 2603                 /*
 2604                  * The transport layer peripheral doesn't have a target or
 2605                  * lun.
 2606                  */
 2607                 if (periph->path->target)
 2608                         cdm->matches[j].result.periph_result.target_id =
 2609                                 periph->path->target->target_id;
 2610                 else
 2611                         cdm->matches[j].result.periph_result.target_id = -1;
 2612 
 2613                 if (periph->path->device)
 2614                         cdm->matches[j].result.periph_result.target_lun =
 2615                                 periph->path->device->lun_id;
 2616                 else
 2617                         cdm->matches[j].result.periph_result.target_lun = -1;
 2618 
 2619                 cdm->matches[j].result.periph_result.unit_number =
 2620                         periph->unit_number;
 2621                 strncpy(cdm->matches[j].result.periph_result.periph_name,
 2622                         periph->periph_name, DEV_IDLEN);
 2623         }
 2624 
 2625         return(1);
 2626 }
 2627 
 2628 static int
 2629 xptperiphlistmatch(struct ccb_dev_match *cdm)
 2630 {
 2631         int ret;
 2632 
 2633         cdm->num_matches = 0;
 2634 
 2635         /*
 2636          * At this point in the edt traversal function, we check the bus
 2637          * list generation to make sure that no busses have been added or
 2638          * removed since the user last sent a XPT_DEV_MATCH ccb through.
 2639          * For the peripheral driver list traversal function, however, we
 2640          * don't have to worry about new peripheral driver types coming or
 2641          * going; they're in a linker set, and therefore can't change
 2642          * without a recompile.
 2643          */
 2644 
 2645         if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
 2646          && (cdm->pos.cookie.pdrv != NULL))
 2647                 ret = xptpdrvtraverse(
 2648                                 (struct periph_driver **)cdm->pos.cookie.pdrv,
 2649                                 xptplistpdrvfunc, cdm);
 2650         else
 2651                 ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm);
 2652 
 2653         /*
 2654          * If we get back 0, that means that we had to stop before fully
 2655          * traversing the peripheral driver tree.  It also means that one of
 2656          * the subroutines has set the status field to the proper value.  If
 2657          * we get back 1, we've fully traversed the EDT and copied out any
 2658          * matching entries.
 2659          */
 2660         if (ret == 1)
 2661                 cdm->status = CAM_DEV_MATCH_LAST;
 2662 
 2663         return(ret);
 2664 }
 2665 
 2666 static int
 2667 xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg)
 2668 {
 2669         struct cam_eb *bus, *next_bus;
 2670         int retval;
 2671 
 2672         retval = 1;
 2673 
 2674         for (bus = (start_bus ? start_bus : TAILQ_FIRST(&xpt_busses));
 2675              bus != NULL;
 2676              bus = next_bus) {
 2677                 next_bus = TAILQ_NEXT(bus, links);
 2678 
 2679                 retval = tr_func(bus, arg);
 2680                 if (retval == 0)
 2681                         return(retval);
 2682         }
 2683 
 2684         return(retval);
 2685 }
 2686 
 2687 static int
 2688 xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target,
 2689                   xpt_targetfunc_t *tr_func, void *arg)
 2690 {
 2691         struct cam_et *target, *next_target;
 2692         int retval;
 2693 
 2694         retval = 1;
 2695         for (target = (start_target ? start_target :
 2696                        TAILQ_FIRST(&bus->et_entries));
 2697              target != NULL; target = next_target) {
 2698 
 2699                 next_target = TAILQ_NEXT(target, links);
 2700 
 2701                 retval = tr_func(target, arg);
 2702 
 2703                 if (retval == 0)
 2704                         return(retval);
 2705         }
 2706 
 2707         return(retval);
 2708 }
 2709 
 2710 static int
 2711 xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device,
 2712                   xpt_devicefunc_t *tr_func, void *arg)
 2713 {
 2714         struct cam_ed *device, *next_device;
 2715         int retval;
 2716 
 2717         retval = 1;
 2718         for (device = (start_device ? start_device :
 2719                        TAILQ_FIRST(&target->ed_entries));
 2720              device != NULL;
 2721              device = next_device) {
 2722 
 2723                 next_device = TAILQ_NEXT(device, links);
 2724 
 2725                 retval = tr_func(device, arg);
 2726 
 2727                 if (retval == 0)
 2728                         return(retval);
 2729         }
 2730 
 2731         return(retval);
 2732 }
 2733 
 2734 static int
 2735 xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph,
 2736                   xpt_periphfunc_t *tr_func, void *arg)
 2737 {
 2738         struct cam_periph *periph, *next_periph;
 2739         int retval;
 2740 
 2741         retval = 1;
 2742 
 2743         for (periph = (start_periph ? start_periph :
 2744                        SLIST_FIRST(&device->periphs));
 2745              periph != NULL;
 2746              periph = next_periph) {
 2747 
 2748                 next_periph = SLIST_NEXT(periph, periph_links);
 2749 
 2750                 retval = tr_func(periph, arg);
 2751                 if (retval == 0)
 2752                         return(retval);
 2753         }
 2754 
 2755         return(retval);
 2756 }
 2757 
 2758 static int
 2759 xptpdrvtraverse(struct periph_driver **start_pdrv,
 2760                 xpt_pdrvfunc_t *tr_func, void *arg)
 2761 {
 2762         struct periph_driver **pdrv;
 2763         int retval;
 2764 
 2765         retval = 1;
 2766 
 2767         /*
 2768          * We don't traverse the peripheral driver list like we do the
 2769          * other lists, because it is a linker set, and therefore cannot be
 2770          * changed during runtime.  If the peripheral driver list is ever
 2771          * re-done to be something other than a linker set (i.e. it can
 2772          * change while the system is running), the list traversal should
 2773          * be modified to work like the other traversal functions.
 2774          */
 2775         for (pdrv = (start_pdrv ? start_pdrv : periph_drivers);
 2776              *pdrv != NULL; pdrv++) {
 2777                 retval = tr_func(pdrv, arg);
 2778 
 2779                 if (retval == 0)
 2780                         return(retval);
 2781         }
 2782 
 2783         return(retval);
 2784 }
 2785 
 2786 static int
 2787 xptpdperiphtraverse(struct periph_driver **pdrv,
 2788                     struct cam_periph *start_periph,
 2789                     xpt_periphfunc_t *tr_func, void *arg)
 2790 {
 2791         struct cam_periph *periph, *next_periph;
 2792         int retval;
 2793 
 2794         retval = 1;
 2795 
 2796         for (periph = (start_periph ? start_periph :
 2797              TAILQ_FIRST(&(*pdrv)->units)); periph != NULL;
 2798              periph = next_periph) {
 2799 
 2800                 next_periph = TAILQ_NEXT(periph, unit_links);
 2801 
 2802                 retval = tr_func(periph, arg);
 2803                 if (retval == 0)
 2804                         return(retval);
 2805         }
 2806         return(retval);
 2807 }
 2808 
 2809 static int
 2810 xptdefbusfunc(struct cam_eb *bus, void *arg)
 2811 {
 2812         struct xpt_traverse_config *tr_config;
 2813 
 2814         tr_config = (struct xpt_traverse_config *)arg;
 2815 
 2816         if (tr_config->depth == XPT_DEPTH_BUS) {
 2817                 xpt_busfunc_t *tr_func;
 2818 
 2819                 tr_func = (xpt_busfunc_t *)tr_config->tr_func;
 2820 
 2821                 return(tr_func(bus, tr_config->tr_arg));
 2822         } else
 2823                 return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg));
 2824 }
 2825 
 2826 static int
 2827 xptdeftargetfunc(struct cam_et *target, void *arg)
 2828 {
 2829         struct xpt_traverse_config *tr_config;
 2830 
 2831         tr_config = (struct xpt_traverse_config *)arg;
 2832 
 2833         if (tr_config->depth == XPT_DEPTH_TARGET) {
 2834                 xpt_targetfunc_t *tr_func;
 2835 
 2836                 tr_func = (xpt_targetfunc_t *)tr_config->tr_func;
 2837 
 2838                 return(tr_func(target, tr_config->tr_arg));
 2839         } else
 2840                 return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg));
 2841 }
 2842 
 2843 static int
 2844 xptdefdevicefunc(struct cam_ed *device, void *arg)
 2845 {
 2846         struct xpt_traverse_config *tr_config;
 2847 
 2848         tr_config = (struct xpt_traverse_config *)arg;
 2849 
 2850         if (tr_config->depth == XPT_DEPTH_DEVICE) {
 2851                 xpt_devicefunc_t *tr_func;
 2852 
 2853                 tr_func = (xpt_devicefunc_t *)tr_config->tr_func;
 2854 
 2855                 return(tr_func(device, tr_config->tr_arg));
 2856         } else
 2857                 return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg));
 2858 }
 2859 
 2860 static int
 2861 xptdefperiphfunc(struct cam_periph *periph, void *arg)
 2862 {
 2863         struct xpt_traverse_config *tr_config;
 2864         xpt_periphfunc_t *tr_func;
 2865 
 2866         tr_config = (struct xpt_traverse_config *)arg;
 2867 
 2868         tr_func = (xpt_periphfunc_t *)tr_config->tr_func;
 2869 
 2870         /*
 2871          * Unlike the other default functions, we don't check for depth
 2872          * here.  The peripheral driver level is the last level in the EDT,
 2873          * so if we're here, we should execute the function in question.
 2874          */
 2875         return(tr_func(periph, tr_config->tr_arg));
 2876 }
 2877 
 2878 /*
 2879  * Execute the given function for every bus in the EDT.
 2880  */
 2881 static int
 2882 xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg)
 2883 {
 2884         struct xpt_traverse_config tr_config;
 2885 
 2886         tr_config.depth = XPT_DEPTH_BUS;
 2887         tr_config.tr_func = tr_func;
 2888         tr_config.tr_arg = arg;
 2889 
 2890         return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
 2891 }
 2892 
 2893 #ifdef notusedyet
 2894 /*
 2895  * Execute the given function for every target in the EDT.
 2896  */
 2897 static int
 2898 xpt_for_all_targets(xpt_targetfunc_t *tr_func, void *arg)
 2899 {
 2900         struct xpt_traverse_config tr_config;
 2901 
 2902         tr_config.depth = XPT_DEPTH_TARGET;
 2903         tr_config.tr_func = tr_func;
 2904         tr_config.tr_arg = arg;
 2905 
 2906         return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
 2907 }
 2908 #endif /* notusedyet */
 2909 
 2910 /*
 2911  * Execute the given function for every device in the EDT.
 2912  */
 2913 static int
 2914 xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg)
 2915 {
 2916         struct xpt_traverse_config tr_config;
 2917 
 2918         tr_config.depth = XPT_DEPTH_DEVICE;
 2919         tr_config.tr_func = tr_func;
 2920         tr_config.tr_arg = arg;
 2921 
 2922         return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
 2923 }
 2924 
 2925 #ifdef notusedyet
 2926 /*
 2927  * Execute the given function for every peripheral in the EDT.
 2928  */
 2929 static int
 2930 xpt_for_all_periphs(xpt_periphfunc_t *tr_func, void *arg)
 2931 {
 2932         struct xpt_traverse_config tr_config;
 2933 
 2934         tr_config.depth = XPT_DEPTH_PERIPH;
 2935         tr_config.tr_func = tr_func;
 2936         tr_config.tr_arg = arg;
 2937 
 2938         return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
 2939 }
 2940 #endif /* notusedyet */
 2941 
 2942 static int
 2943 xptsetasyncfunc(struct cam_ed *device, void *arg)
 2944 {
 2945         struct cam_path path;
 2946         struct ccb_getdev cgd;
 2947         struct async_node *cur_entry;
 2948 
 2949         cur_entry = (struct async_node *)arg;
 2950 
 2951         /*
 2952          * Don't report unconfigured devices (Wildcard devs,
 2953          * devices only for target mode, device instances
 2954          * that have been invalidated but are waiting for
 2955          * their last reference count to be released).
 2956          */
 2957         if ((device->flags & CAM_DEV_UNCONFIGURED) != 0)
 2958                 return (1);
 2959 
 2960         xpt_compile_path(&path,
 2961                          NULL,
 2962                          device->target->bus->path_id,
 2963                          device->target->target_id,
 2964                          device->lun_id);
 2965         xpt_setup_ccb(&cgd.ccb_h, &path, /*priority*/1);
 2966         cgd.ccb_h.func_code = XPT_GDEV_TYPE;
 2967         xpt_action((union ccb *)&cgd);
 2968         cur_entry->callback(cur_entry->callback_arg,
 2969                             AC_FOUND_DEVICE,
 2970                             &path, &cgd);
 2971         xpt_release_path(&path);
 2972 
 2973         return(1);
 2974 }
 2975 
 2976 static int
 2977 xptsetasyncbusfunc(struct cam_eb *bus, void *arg)
 2978 {
 2979         struct cam_path path;
 2980         struct ccb_pathinq cpi;
 2981         struct async_node *cur_entry;
 2982 
 2983         cur_entry = (struct async_node *)arg;
 2984 
 2985         xpt_compile_path(&path, /*periph*/NULL,
 2986                          bus->sim->path_id,
 2987                          CAM_TARGET_WILDCARD,
 2988                          CAM_LUN_WILDCARD);
 2989         xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
 2990         cpi.ccb_h.func_code = XPT_PATH_INQ;
 2991         xpt_action((union ccb *)&cpi);
 2992         cur_entry->callback(cur_entry->callback_arg,
 2993                             AC_PATH_REGISTERED,
 2994                             &path, &cpi);
 2995         xpt_release_path(&path);
 2996 
 2997         return(1);
 2998 }
 2999 
 3000 void
 3001 xpt_action(union ccb *start_ccb)
 3002 {
 3003         int iopl;
 3004 
 3005         GIANT_REQUIRED;
 3006 
 3007         CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_action\n"));
 3008 
 3009         start_ccb->ccb_h.status = CAM_REQ_INPROG;
 3010 
 3011         iopl = splsoftcam();
 3012         switch (start_ccb->ccb_h.func_code) {
 3013         case XPT_SCSI_IO:
 3014         {
 3015 #ifdef CAM_NEW_TRAN_CODE
 3016                 struct cam_ed *device;
 3017 #endif /* CAM_NEW_TRAN_CODE */
 3018 #ifdef CAMDEBUG
 3019                 char cdb_str[(SCSI_MAX_CDBLEN * 3) + 1];
 3020                 struct cam_path *path;
 3021 
 3022                 path = start_ccb->ccb_h.path;
 3023 #endif
 3024 
 3025                 /*
 3026                  * For the sake of compatibility with SCSI-1
 3027                  * devices that may not understand the identify
 3028                  * message, we include lun information in the
 3029                  * second byte of all commands.  SCSI-1 specifies
 3030                  * that luns are a 3 bit value and reserves only 3
 3031                  * bits for lun information in the CDB.  Later
 3032                  * revisions of the SCSI spec allow for more than 8
 3033                  * luns, but have deprecated lun information in the
 3034                  * CDB.  So, if the lun won't fit, we must omit.
 3035                  *
 3036                  * Also be aware that during initial probing for devices,
 3037                  * the inquiry information is unknown but initialized to 0.
 3038                  * This means that this code will be exercised while probing
 3039                  * devices with an ANSI revision greater than 2.
 3040                  */
 3041 #ifdef CAM_NEW_TRAN_CODE
 3042                 device = start_ccb->ccb_h.path->device;
 3043                 if (device->protocol_version <= SCSI_REV_2
 3044 #else /* CAM_NEW_TRAN_CODE */
 3045                 if (SID_ANSI_REV(&start_ccb->ccb_h.path->device->inq_data) <= 2
 3046 #endif /* CAM_NEW_TRAN_CODE */
 3047                  && start_ccb->ccb_h.target_lun < 8
 3048                  && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) {
 3049 
 3050                         start_ccb->csio.cdb_io.cdb_bytes[1] |=
 3051                             start_ccb->ccb_h.target_lun << 5;
 3052                 }
 3053                 start_ccb->csio.scsi_status = SCSI_STATUS_OK;
 3054                 CAM_DEBUG(path, CAM_DEBUG_CDB,("%s. CDB: %s\n",
 3055                           scsi_op_desc(start_ccb->csio.cdb_io.cdb_bytes[0],
 3056                                        &path->device->inq_data),
 3057                           scsi_cdb_string(start_ccb->csio.cdb_io.cdb_bytes,
 3058                                           cdb_str, sizeof(cdb_str))));
 3059         }
 3060         /* FALLTHROUGH */
 3061         case XPT_TARGET_IO:
 3062         case XPT_CONT_TARGET_IO:
 3063                 start_ccb->csio.sense_resid = 0;
 3064                 start_ccb->csio.resid = 0;
 3065                 /* FALLTHROUGH */
 3066         case XPT_RESET_DEV:
 3067         case XPT_ENG_EXEC:
 3068         {
 3069                 struct cam_path *path;
 3070                 struct cam_sim *sim;
 3071                 int s;
 3072                 int runq;
 3073 
 3074                 path = start_ccb->ccb_h.path;
 3075                 s = splsoftcam();
 3076 
 3077                 sim = path->bus->sim;
 3078                 if (SIM_DEAD(sim)) {
 3079                         /* The SIM has gone; just execute the CCB directly. */
 3080                         cam_ccbq_send_ccb(&path->device->ccbq, start_ccb);
 3081                         (*(sim->sim_action))(sim, start_ccb);
 3082                         splx(s);
 3083                         break;
 3084                 }
 3085 
 3086                 cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb);
 3087                 if (path->device->qfrozen_cnt == 0)
 3088                         runq = xpt_schedule_dev_sendq(path->bus, path->device);
 3089                 else
 3090                         runq = 0;
 3091                 splx(s);
 3092                 if (runq != 0)
 3093                         xpt_run_dev_sendq(path->bus);
 3094                 break;
 3095         }
 3096         case XPT_SET_TRAN_SETTINGS:
 3097         {
 3098                 xpt_set_transfer_settings(&start_ccb->cts,
 3099                                           start_ccb->ccb_h.path->device,
 3100                                           /*async_update*/FALSE);
 3101                 break;
 3102         }
 3103         case XPT_CALC_GEOMETRY:
 3104         {
 3105                 struct cam_sim *sim;
 3106 
 3107                 /* Filter out garbage */
 3108                 if (start_ccb->ccg.block_size == 0
 3109                  || start_ccb->ccg.volume_size == 0) {
 3110                         start_ccb->ccg.cylinders = 0;
 3111                         start_ccb->ccg.heads = 0;
 3112                         start_ccb->ccg.secs_per_track = 0;
 3113                         start_ccb->ccb_h.status = CAM_REQ_CMP;
 3114                         break;
 3115                 }
 3116 #ifdef PC98
 3117                 /*
 3118                  * In a PC-98 system, geometry translation depens on
 3119                  * the "real" device geometry obtained from mode page 4.
 3120                  * SCSI geometry translation is performed in the
 3121                  * initialization routine of the SCSI BIOS and the result
 3122                  * stored in host memory.  If the translation is available
 3123                  * in host memory, use it.  If not, rely on the default
 3124                  * translation the device driver performs.
 3125                  */
 3126                 if (scsi_da_bios_params(&start_ccb->ccg) != 0) {
 3127                         start_ccb->ccb_h.status = CAM_REQ_CMP;
 3128                         break;
 3129                 }
 3130 #endif
 3131                 sim = start_ccb->ccb_h.path->bus->sim;
 3132                 (*(sim->sim_action))(sim, start_ccb);
 3133                 break;
 3134         }
 3135         case XPT_ABORT:
 3136         {
 3137                 union ccb* abort_ccb;
 3138                 int s;                          
 3139 
 3140                 abort_ccb = start_ccb->cab.abort_ccb;
 3141                 if (XPT_FC_IS_DEV_QUEUED(abort_ccb)) {
 3142 
 3143                         if (abort_ccb->ccb_h.pinfo.index >= 0) {
 3144                                 struct cam_ccbq *ccbq;
 3145 
 3146                                 ccbq = &abort_ccb->ccb_h.path->device->ccbq;
 3147                                 cam_ccbq_remove_ccb(ccbq, abort_ccb);
 3148                                 abort_ccb->ccb_h.status =
 3149                                     CAM_REQ_ABORTED|CAM_DEV_QFRZN;
 3150                                 xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
 3151                                 s = splcam();
 3152                                 xpt_done(abort_ccb);
 3153                                 splx(s);
 3154                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 3155                                 break;
 3156                         }
 3157                         if (abort_ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX
 3158                          && (abort_ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) {
 3159                                 /*
 3160                                  * We've caught this ccb en route to
 3161                                  * the SIM.  Flag it for abort and the
 3162                                  * SIM will do so just before starting
 3163                                  * real work on the CCB.
 3164                                  */
 3165                                 abort_ccb->ccb_h.status =
 3166                                     CAM_REQ_ABORTED|CAM_DEV_QFRZN;
 3167                                 xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
 3168                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 3169                                 break;
 3170                         }
 3171                 } 
 3172                 if (XPT_FC_IS_QUEUED(abort_ccb)
 3173                  && (abort_ccb->ccb_h.pinfo.index == CAM_DONEQ_INDEX)) {
 3174                         /*
 3175                          * It's already completed but waiting
 3176                          * for our SWI to get to it.
 3177                          */
 3178                         start_ccb->ccb_h.status = CAM_UA_ABORT;
 3179                         break;
 3180                 }
 3181                 /*
 3182                  * If we weren't able to take care of the abort request
 3183                  * in the XPT, pass the request down to the SIM for processing.
 3184                  */
 3185         }
 3186         /* FALLTHROUGH */
 3187         case XPT_ACCEPT_TARGET_IO:
 3188         case XPT_EN_LUN:
 3189         case XPT_IMMED_NOTIFY:
 3190         case XPT_NOTIFY_ACK:
 3191         case XPT_GET_TRAN_SETTINGS:
 3192         case XPT_RESET_BUS:
 3193         {
 3194                 struct cam_sim *sim;
 3195 
 3196                 sim = start_ccb->ccb_h.path->bus->sim;
 3197                 (*(sim->sim_action))(sim, start_ccb);
 3198                 break;
 3199         }
 3200         case XPT_PATH_INQ:
 3201         {
 3202                 struct cam_sim *sim;
 3203 
 3204                 sim = start_ccb->ccb_h.path->bus->sim;
 3205                 (*(sim->sim_action))(sim, start_ccb);
 3206                 break;
 3207         }
 3208         case XPT_PATH_STATS:
 3209                 start_ccb->cpis.last_reset =
 3210                         start_ccb->ccb_h.path->bus->last_reset;
 3211                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 3212                 break;
 3213         case XPT_GDEV_TYPE:
 3214         {
 3215                 struct cam_ed *dev;
 3216                 int s;
 3217 
 3218                 dev = start_ccb->ccb_h.path->device;
 3219                 s = splcam();
 3220                 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
 3221                         start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
 3222                 } else {
 3223                         struct ccb_getdev *cgd;
 3224                         struct cam_eb *bus;
 3225                         struct cam_et *tar;
 3226 
 3227                         cgd = &start_ccb->cgd;
 3228                         bus = cgd->ccb_h.path->bus;
 3229                         tar = cgd->ccb_h.path->target;
 3230                         cgd->inq_data = dev->inq_data;
 3231                         cgd->ccb_h.status = CAM_REQ_CMP;
 3232                         cgd->serial_num_len = dev->serial_num_len;
 3233                         if ((dev->serial_num_len > 0)
 3234                          && (dev->serial_num != NULL))
 3235                                 bcopy(dev->serial_num, cgd->serial_num,
 3236                                       dev->serial_num_len);
 3237                 }
 3238                 splx(s);
 3239                 break; 
 3240         }
 3241         case XPT_GDEV_STATS:
 3242         {
 3243                 struct cam_ed *dev;
 3244                 int s;
 3245 
 3246                 dev = start_ccb->ccb_h.path->device;
 3247                 s = splcam();
 3248                 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
 3249                         start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
 3250                 } else {
 3251                         struct ccb_getdevstats *cgds;
 3252                         struct cam_eb *bus;
 3253                         struct cam_et *tar;
 3254 
 3255                         cgds = &start_ccb->cgds;
 3256                         bus = cgds->ccb_h.path->bus;
 3257                         tar = cgds->ccb_h.path->target;
 3258                         cgds->dev_openings = dev->ccbq.dev_openings;
 3259                         cgds->dev_active = dev->ccbq.dev_active;
 3260                         cgds->devq_openings = dev->ccbq.devq_openings;
 3261                         cgds->devq_queued = dev->ccbq.queue.entries;
 3262                         cgds->held = dev->ccbq.held;
 3263                         cgds->last_reset = tar->last_reset;
 3264                         cgds->maxtags = dev->quirk->maxtags;
 3265                         cgds->mintags = dev->quirk->mintags;
 3266                         if (timevalcmp(&tar->last_reset, &bus->last_reset, <))
 3267                                 cgds->last_reset = bus->last_reset;
 3268                         cgds->ccb_h.status = CAM_REQ_CMP;
 3269                 }
 3270                 splx(s);
 3271                 break;
 3272         }
 3273         case XPT_GDEVLIST:
 3274         {
 3275                 struct cam_periph       *nperiph;
 3276                 struct periph_list      *periph_head;
 3277                 struct ccb_getdevlist   *cgdl;
 3278                 u_int                   i;
 3279                 int                     s;
 3280                 struct cam_ed           *device;
 3281                 int                     found;
 3282 
 3283 
 3284                 found = 0;
 3285 
 3286                 /*
 3287                  * Don't want anyone mucking with our data.
 3288                  */
 3289                 s = splcam();
 3290                 device = start_ccb->ccb_h.path->device;
 3291                 periph_head = &device->periphs;
 3292                 cgdl = &start_ccb->cgdl;
 3293 
 3294                 /*
 3295                  * Check and see if the list has changed since the user
 3296                  * last requested a list member.  If so, tell them that the
 3297                  * list has changed, and therefore they need to start over 
 3298                  * from the beginning.
 3299                  */
 3300                 if ((cgdl->index != 0) && 
 3301                     (cgdl->generation != device->generation)) {
 3302                         cgdl->status = CAM_GDEVLIST_LIST_CHANGED;
 3303                         splx(s);
 3304                         break;
 3305                 }
 3306 
 3307                 /*
 3308                  * Traverse the list of peripherals and attempt to find 
 3309                  * the requested peripheral.
 3310                  */
 3311                 for (nperiph = SLIST_FIRST(periph_head), i = 0;
 3312                      (nperiph != NULL) && (i <= cgdl->index);
 3313                      nperiph = SLIST_NEXT(nperiph, periph_links), i++) {
 3314                         if (i == cgdl->index) {
 3315                                 strncpy(cgdl->periph_name,
 3316                                         nperiph->periph_name,
 3317                                         DEV_IDLEN);
 3318                                 cgdl->unit_number = nperiph->unit_number;
 3319                                 found = 1;
 3320                         }
 3321                 }
 3322                 if (found == 0) {
 3323                         cgdl->status = CAM_GDEVLIST_ERROR;
 3324                         splx(s);
 3325                         break;
 3326                 }
 3327 
 3328                 if (nperiph == NULL)
 3329                         cgdl->status = CAM_GDEVLIST_LAST_DEVICE;
 3330                 else
 3331                         cgdl->status = CAM_GDEVLIST_MORE_DEVS;
 3332 
 3333                 cgdl->index++;
 3334                 cgdl->generation = device->generation;
 3335 
 3336                 splx(s);
 3337                 cgdl->ccb_h.status = CAM_REQ_CMP;
 3338                 break;
 3339         }
 3340         case XPT_DEV_MATCH:
 3341         {
 3342                 int s;
 3343                 dev_pos_type position_type;
 3344                 struct ccb_dev_match *cdm;
 3345 
 3346                 cdm = &start_ccb->cdm;
 3347 
 3348                 /*
 3349                  * Prevent EDT changes while we traverse it.
 3350                  */
 3351                 s = splcam();
 3352                 /*
 3353                  * There are two ways of getting at information in the EDT.
 3354                  * The first way is via the primary EDT tree.  It starts
 3355                  * with a list of busses, then a list of targets on a bus,
 3356                  * then devices/luns on a target, and then peripherals on a
 3357                  * device/lun.  The "other" way is by the peripheral driver
 3358                  * lists.  The peripheral driver lists are organized by
 3359                  * peripheral driver.  (obviously)  So it makes sense to
 3360                  * use the peripheral driver list if the user is looking
 3361                  * for something like "da1", or all "da" devices.  If the
 3362                  * user is looking for something on a particular bus/target
 3363                  * or lun, it's generally better to go through the EDT tree.
 3364                  */
 3365 
 3366                 if (cdm->pos.position_type != CAM_DEV_POS_NONE)
 3367                         position_type = cdm->pos.position_type;
 3368                 else {
 3369                         u_int i;
 3370 
 3371                         position_type = CAM_DEV_POS_NONE;
 3372 
 3373                         for (i = 0; i < cdm->num_patterns; i++) {
 3374                                 if ((cdm->patterns[i].type == DEV_MATCH_BUS)
 3375                                  ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){
 3376                                         position_type = CAM_DEV_POS_EDT;
 3377                                         break;
 3378                                 }
 3379                         }
 3380 
 3381                         if (cdm->num_patterns == 0)
 3382                                 position_type = CAM_DEV_POS_EDT;
 3383                         else if (position_type == CAM_DEV_POS_NONE)
 3384                                 position_type = CAM_DEV_POS_PDRV;
 3385                 }
 3386 
 3387                 switch(position_type & CAM_DEV_POS_TYPEMASK) {
 3388                 case CAM_DEV_POS_EDT:
 3389                         xptedtmatch(cdm);
 3390                         break;
 3391                 case CAM_DEV_POS_PDRV:
 3392                         xptperiphlistmatch(cdm);
 3393                         break;
 3394                 default:
 3395                         cdm->status = CAM_DEV_MATCH_ERROR;
 3396                         break;
 3397                 }
 3398 
 3399                 splx(s);
 3400 
 3401                 if (cdm->status == CAM_DEV_MATCH_ERROR)
 3402                         start_ccb->ccb_h.status = CAM_REQ_CMP_ERR;
 3403                 else
 3404                         start_ccb->ccb_h.status = CAM_REQ_CMP;
 3405 
 3406                 break;
 3407         }
 3408         case XPT_SASYNC_CB:
 3409         {
 3410                 struct ccb_setasync *csa;
 3411                 struct async_node *cur_entry;
 3412                 struct async_list *async_head;
 3413                 u_int32_t added;
 3414                 int s;
 3415 
 3416                 csa = &start_ccb->csa;
 3417                 added = csa->event_enable;
 3418                 async_head = &csa->ccb_h.path->device->asyncs;
 3419 
 3420                 /*
 3421                  * If there is already an entry for us, simply
 3422                  * update it.
 3423                  */
 3424                 s = splcam();
 3425                 cur_entry = SLIST_FIRST(async_head);
 3426                 while (cur_entry != NULL) {
 3427                         if ((cur_entry->callback_arg == csa->callback_arg)
 3428                          && (cur_entry->callback == csa->callback))
 3429                                 break;
 3430                         cur_entry = SLIST_NEXT(cur_entry, links);
 3431                 }
 3432 
 3433                 if (cur_entry != NULL) {
 3434                         /*
 3435                          * If the request has no flags set,
 3436                          * remove the entry.
 3437                          */
 3438                         added &= ~cur_entry->event_enable;
 3439                         if (csa->event_enable == 0) {
 3440                                 SLIST_REMOVE(async_head, cur_entry,
 3441                                              async_node, links);
 3442                                 csa->ccb_h.path->device->refcount--;
 3443                                 free(cur_entry, M_CAMXPT);
 3444                         } else {
 3445                                 cur_entry->event_enable = csa->event_enable;
 3446                         }
 3447                 } else {
 3448                         cur_entry = malloc(sizeof(*cur_entry), M_CAMXPT,
 3449                                            M_NOWAIT);
 3450                         if (cur_entry == NULL) {
 3451                                 splx(s);
 3452                                 csa->ccb_h.status = CAM_RESRC_UNAVAIL;
 3453                                 break;
 3454                         }
 3455                         cur_entry->event_enable = csa->event_enable;
 3456                         cur_entry->callback_arg = csa->callback_arg;
 3457                         cur_entry->callback = csa->callback;
 3458                         SLIST_INSERT_HEAD(async_head, cur_entry, links);
 3459                         csa->ccb_h.path->device->refcount++;
 3460                 }
 3461 
 3462                 if ((added & AC_FOUND_DEVICE) != 0) {
 3463                         /*
 3464                          * Get this peripheral up to date with all
 3465                          * the currently existing devices.
 3466                          */
 3467                         xpt_for_all_devices(xptsetasyncfunc, cur_entry);
 3468                 }
 3469                 if ((added & AC_PATH_REGISTERED) != 0) {
 3470                         /*
 3471                          * Get this peripheral up to date with all
 3472                          * the currently existing busses.
 3473                          */
 3474                         xpt_for_all_busses(xptsetasyncbusfunc, cur_entry);
 3475                 }
 3476                 splx(s);
 3477                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 3478                 break;
 3479         }
 3480         case XPT_REL_SIMQ:
 3481         {
 3482                 struct ccb_relsim *crs;
 3483                 struct cam_ed *dev;
 3484                 int s;
 3485 
 3486                 crs = &start_ccb->crs;
 3487                 dev = crs->ccb_h.path->device;
 3488                 if (dev == NULL) {
 3489 
 3490                         crs->ccb_h.status = CAM_DEV_NOT_THERE;
 3491                         break;
 3492                 }
 3493 
 3494                 s = splcam();
 3495 
 3496                 if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) {
 3497 
 3498                         if ((dev->inq_data.flags & SID_CmdQue) != 0) {
 3499 
 3500                                 /* Don't ever go below one opening */
 3501                                 if (crs->openings > 0) {
 3502                                         xpt_dev_ccbq_resize(crs->ccb_h.path,
 3503                                                             crs->openings);
 3504 
 3505                                         if (bootverbose) {
 3506                                                 xpt_print_path(crs->ccb_h.path);
 3507                                                 printf("tagged openings "
 3508                                                        "now %d\n",
 3509                                                        crs->openings);
 3510                                         }
 3511                                 }
 3512                         }
 3513                 }
 3514 
 3515                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) {
 3516 
 3517                         if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
 3518 
 3519                                 /*
 3520                                  * Just extend the old timeout and decrement
 3521                                  * the freeze count so that a single timeout
 3522                                  * is sufficient for releasing the queue.
 3523                                  */
 3524                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
 3525                                 untimeout(xpt_release_devq_timeout,
 3526                                           dev, dev->c_handle);
 3527                         } else {
 3528 
 3529                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
 3530                         }
 3531 
 3532                         dev->c_handle =
 3533                                 timeout(xpt_release_devq_timeout,
 3534                                         dev,
 3535                                         (crs->release_timeout * hz) / 1000);
 3536 
 3537                         dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING;
 3538 
 3539                 }
 3540 
 3541                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) {
 3542 
 3543                         if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) {
 3544                                 /*
 3545                                  * Decrement the freeze count so that a single
 3546                                  * completion is still sufficient to unfreeze
 3547                                  * the queue.
 3548                                  */
 3549                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
 3550                         } else {
 3551                                 
 3552                                 dev->flags |= CAM_DEV_REL_ON_COMPLETE;
 3553                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
 3554                         }
 3555                 }
 3556 
 3557                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) {
 3558 
 3559                         if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
 3560                          || (dev->ccbq.dev_active == 0)) {
 3561 
 3562                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
 3563                         } else {
 3564                                 
 3565                                 dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY;
 3566                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
 3567                         }
 3568                 }
 3569                 splx(s);
 3570                 
 3571                 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0) {
 3572 
 3573                         xpt_release_devq(crs->ccb_h.path, /*count*/1,
 3574                                          /*run_queue*/TRUE);
 3575                 }
 3576                 start_ccb->crs.qfrozen_cnt = dev->qfrozen_cnt;
 3577                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 3578                 break;
 3579         }
 3580         case XPT_SCAN_BUS:
 3581                 xpt_scan_bus(start_ccb->ccb_h.path->periph, start_ccb);
 3582                 break;
 3583         case XPT_SCAN_LUN:
 3584                 xpt_scan_lun(start_ccb->ccb_h.path->periph,
 3585                              start_ccb->ccb_h.path, start_ccb->crcn.flags,
 3586                              start_ccb);
 3587                 break;
 3588         case XPT_DEBUG: {
 3589 #ifdef CAMDEBUG
 3590                 int s;
 3591                 
 3592                 s = splcam();
 3593 #ifdef CAM_DEBUG_DELAY
 3594                 cam_debug_delay = CAM_DEBUG_DELAY;
 3595 #endif
 3596                 cam_dflags = start_ccb->cdbg.flags;
 3597                 if (cam_dpath != NULL) {
 3598                         xpt_free_path(cam_dpath);
 3599                         cam_dpath = NULL;
 3600                 }
 3601 
 3602                 if (cam_dflags != CAM_DEBUG_NONE) {
 3603                         if (xpt_create_path(&cam_dpath, xpt_periph,
 3604                                             start_ccb->ccb_h.path_id,
 3605                                             start_ccb->ccb_h.target_id,
 3606                                             start_ccb->ccb_h.target_lun) !=
 3607                                             CAM_REQ_CMP) {
 3608                                 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
 3609                                 cam_dflags = CAM_DEBUG_NONE;
 3610                         } else {
 3611                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 3612                                 xpt_print_path(cam_dpath);
 3613                                 printf("debugging flags now %x\n", cam_dflags);
 3614                         }
 3615                 } else {
 3616                         cam_dpath = NULL;
 3617                         start_ccb->ccb_h.status = CAM_REQ_CMP;
 3618                 }
 3619                 splx(s);
 3620 #else /* !CAMDEBUG */
 3621                 start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
 3622 #endif /* CAMDEBUG */
 3623                 break;
 3624         }
 3625         case XPT_NOOP:
 3626                 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0)
 3627                         xpt_freeze_devq(start_ccb->ccb_h.path, 1);
 3628                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 3629                 break;
 3630         default:
 3631         case XPT_SDEV_TYPE:
 3632         case XPT_TERM_IO:
 3633         case XPT_ENG_INQ:
 3634                 /* XXX Implement */
 3635                 start_ccb->ccb_h.status = CAM_PROVIDE_FAIL;
 3636                 break;
 3637         }
 3638         splx(iopl);
 3639 }
 3640 
 3641 void
 3642 xpt_polled_action(union ccb *start_ccb)
 3643 {
 3644         int       s;
 3645         u_int32_t timeout;
 3646         struct    cam_sim *sim; 
 3647         struct    cam_devq *devq;
 3648         struct    cam_ed *dev;
 3649 
 3650         GIANT_REQUIRED;
 3651 
 3652         timeout = start_ccb->ccb_h.timeout;
 3653         sim = start_ccb->ccb_h.path->bus->sim;
 3654         devq = sim->devq;
 3655         dev = start_ccb->ccb_h.path->device;
 3656 
 3657         s = splcam();
 3658 
 3659         /*
 3660          * Steal an opening so that no other queued requests
 3661          * can get it before us while we simulate interrupts.
 3662          */
 3663         dev->ccbq.devq_openings--;
 3664         dev->ccbq.dev_openings--;       
 3665         
 3666         while(((devq != NULL && devq->send_openings <= 0) ||
 3667            dev->ccbq.dev_openings < 0) && (--timeout > 0)) {
 3668                 DELAY(1000);
 3669                 (*(sim->sim_poll))(sim);
 3670                 camisr(&cam_bioq);
 3671         }
 3672         
 3673         dev->ccbq.devq_openings++;
 3674         dev->ccbq.dev_openings++;
 3675         
 3676         if (timeout != 0) {
 3677                 xpt_action(start_ccb);
 3678                 while(--timeout > 0) {
 3679                         (*(sim->sim_poll))(sim);
 3680                         camisr(&cam_bioq);
 3681                         if ((start_ccb->ccb_h.status  & CAM_STATUS_MASK)
 3682                             != CAM_REQ_INPROG)
 3683                                 break;
 3684                         DELAY(1000);
 3685                 }
 3686                 if (timeout == 0) {
 3687                         /*
 3688                          * XXX Is it worth adding a sim_timeout entry
 3689                          * point so we can attempt recovery?  If
 3690                          * this is only used for dumps, I don't think
 3691                          * it is.
 3692                          */
 3693                         start_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
 3694                 }
 3695         } else {
 3696                 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
 3697         }
 3698         splx(s);
 3699 }
 3700         
 3701 /*
 3702  * Schedule a peripheral driver to receive a ccb when it's
 3703  * target device has space for more transactions.
 3704  */
 3705 void
 3706 xpt_schedule(struct cam_periph *perph, u_int32_t new_priority)
 3707 {
 3708         struct cam_ed *device;
 3709         union ccb *work_ccb;
 3710         int s;
 3711         int runq;
 3712 
 3713         GIANT_REQUIRED;
 3714 
 3715         CAM_DEBUG(perph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n"));
 3716         device = perph->path->device;
 3717         s = splsoftcam();
 3718         if (periph_is_queued(perph)) {
 3719                 /* Simply reorder based on new priority */
 3720                 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
 3721                           ("   change priority to %d\n", new_priority));
 3722                 if (new_priority < perph->pinfo.priority) {
 3723                         camq_change_priority(&device->drvq,
 3724                                              perph->pinfo.index,
 3725                                              new_priority);
 3726                 }
 3727                 runq = 0;
 3728         } else if (SIM_DEAD(perph->path->bus->sim)) {
 3729                 /* The SIM is gone so just call periph_start directly. */
 3730                 work_ccb = xpt_get_ccb(perph->path->device);
 3731                 splx(s);
 3732                 if (work_ccb == NULL)
 3733                         return; /* XXX */
 3734                 xpt_setup_ccb(&work_ccb->ccb_h, perph->path, new_priority);
 3735                 perph->pinfo.priority = new_priority;
 3736                 perph->periph_start(perph, work_ccb);
 3737                 return;
 3738         } else {
 3739                 /* New entry on the queue */
 3740                 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
 3741                           ("   added periph to queue\n"));
 3742                 perph->pinfo.priority = new_priority;
 3743                 perph->pinfo.generation = ++device->drvq.generation;
 3744                 camq_insert(&device->drvq, &perph->pinfo);
 3745                 runq = xpt_schedule_dev_allocq(perph->path->bus, device);
 3746         }
 3747         splx(s);
 3748         if (runq != 0) {
 3749                 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
 3750                           ("   calling xpt_run_devq\n"));
 3751                 xpt_run_dev_allocq(perph->path->bus);
 3752         }
 3753 }
 3754 
 3755 
 3756 /*
 3757  * Schedule a device to run on a given queue.
 3758  * If the device was inserted as a new entry on the queue,
 3759  * return 1 meaning the device queue should be run. If we
 3760  * were already queued, implying someone else has already
 3761  * started the queue, return 0 so the caller doesn't attempt
 3762  * to run the queue.  Must be run at either splsoftcam
 3763  * (or splcam since that encompases splsoftcam).
 3764  */
 3765 static int
 3766 xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo,
 3767                  u_int32_t new_priority)
 3768 {
 3769         int retval;
 3770         u_int32_t old_priority;
 3771 
 3772         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_schedule_dev\n"));
 3773 
 3774         old_priority = pinfo->priority;
 3775 
 3776         /*
 3777          * Are we already queued?
 3778          */
 3779         if (pinfo->index != CAM_UNQUEUED_INDEX) {
 3780                 /* Simply reorder based on new priority */
 3781                 if (new_priority < old_priority) {
 3782                         camq_change_priority(queue, pinfo->index,
 3783                                              new_priority);
 3784                         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3785                                         ("changed priority to %d\n",
 3786                                          new_priority));
 3787                 }
 3788                 retval = 0;
 3789         } else {
 3790                 /* New entry on the queue */
 3791                 if (new_priority < old_priority)
 3792                         pinfo->priority = new_priority;
 3793 
 3794                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3795                                 ("Inserting onto queue\n"));
 3796                 pinfo->generation = ++queue->generation;
 3797                 camq_insert(queue, pinfo);
 3798                 retval = 1;
 3799         }
 3800         return (retval);
 3801 }
 3802 
 3803 static void
 3804 xpt_run_dev_allocq(struct cam_eb *bus)
 3805 {
 3806         struct  cam_devq *devq;
 3807         int     s;
 3808 
 3809         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_allocq\n"));
 3810         devq = bus->sim->devq;
 3811 
 3812         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3813                         ("   qfrozen_cnt == 0x%x, entries == %d, "
 3814                          "openings == %d, active == %d\n",
 3815                          devq->alloc_queue.qfrozen_cnt,
 3816                          devq->alloc_queue.entries,
 3817                          devq->alloc_openings,
 3818                          devq->alloc_active));
 3819 
 3820         s = splsoftcam();
 3821         devq->alloc_queue.qfrozen_cnt++;
 3822         while ((devq->alloc_queue.entries > 0)
 3823             && (devq->alloc_openings > 0)
 3824             && (devq->alloc_queue.qfrozen_cnt <= 1)) {
 3825                 struct  cam_ed_qinfo *qinfo;
 3826                 struct  cam_ed *device;
 3827                 union   ccb *work_ccb;
 3828                 struct  cam_periph *drv;
 3829                 struct  camq *drvq;
 3830                 
 3831                 qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->alloc_queue,
 3832                                                            CAMQ_HEAD);
 3833                 device = qinfo->device;
 3834 
 3835                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3836                                 ("running device %p\n", device));
 3837 
 3838                 drvq = &device->drvq;
 3839 
 3840 #ifdef CAMDEBUG
 3841                 if (drvq->entries <= 0) {
 3842                         panic("xpt_run_dev_allocq: "
 3843                               "Device on queue without any work to do");
 3844                 }
 3845 #endif
 3846                 if ((work_ccb = xpt_get_ccb(device)) != NULL) {
 3847                         devq->alloc_openings--;
 3848                         devq->alloc_active++;
 3849                         drv = (struct cam_periph*)camq_remove(drvq, CAMQ_HEAD);
 3850                         splx(s);
 3851                         xpt_setup_ccb(&work_ccb->ccb_h, drv->path,
 3852                                       drv->pinfo.priority);
 3853                         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3854                                         ("calling periph start\n"));
 3855                         drv->periph_start(drv, work_ccb);
 3856                 } else {
 3857                         /*
 3858                          * Malloc failure in alloc_ccb
 3859                          */
 3860                         /*
 3861                          * XXX add us to a list to be run from free_ccb
 3862                          * if we don't have any ccbs active on this
 3863                          * device queue otherwise we may never get run
 3864                          * again.
 3865                          */
 3866                         break;
 3867                 }
 3868         
 3869                 /* Raise IPL for possible insertion and test at top of loop */
 3870                 s = splsoftcam();
 3871 
 3872                 if (drvq->entries > 0) {
 3873                         /* We have more work.  Attempt to reschedule */
 3874                         xpt_schedule_dev_allocq(bus, device);
 3875                 }
 3876         }
 3877         devq->alloc_queue.qfrozen_cnt--;
 3878         splx(s);
 3879 }
 3880 
 3881 static void
 3882 xpt_run_dev_sendq(struct cam_eb *bus)
 3883 {
 3884         struct  cam_devq *devq;
 3885         int     s;
 3886 
 3887         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_sendq\n"));
 3888         
 3889         devq = bus->sim->devq;
 3890 
 3891         s = splcam();
 3892         devq->send_queue.qfrozen_cnt++;
 3893         splx(s);
 3894         s = splsoftcam();
 3895         while ((devq->send_queue.entries > 0)
 3896             && (devq->send_openings > 0)) {
 3897                 struct  cam_ed_qinfo *qinfo;
 3898                 struct  cam_ed *device;
 3899                 union ccb *work_ccb;
 3900                 struct  cam_sim *sim;
 3901                 int     ospl;
 3902 
 3903                 ospl = splcam();
 3904                 if (devq->send_queue.qfrozen_cnt > 1) {
 3905                         splx(ospl);
 3906                         break;
 3907                 }
 3908 
 3909                 qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->send_queue,
 3910                                                            CAMQ_HEAD);
 3911                 device = qinfo->device;
 3912 
 3913                 /*
 3914                  * If the device has been "frozen", don't attempt
 3915                  * to run it.
 3916                  */
 3917                 if (device->qfrozen_cnt > 0) {
 3918                         splx(ospl);
 3919                         continue;
 3920                 }
 3921 
 3922                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3923                                 ("running device %p\n", device));
 3924 
 3925                 work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD);
 3926                 if (work_ccb == NULL) {
 3927                         printf("device on run queue with no ccbs???\n");
 3928                         splx(ospl);
 3929                         continue;
 3930                 }
 3931 
 3932                 if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) {
 3933 
 3934                         if (num_highpower <= 0) {
 3935                                 /*
 3936                                  * We got a high power command, but we
 3937                                  * don't have any available slots.  Freeze
 3938                                  * the device queue until we have a slot
 3939                                  * available.
 3940                                  */
 3941                                 device->qfrozen_cnt++;
 3942                                 STAILQ_INSERT_TAIL(&highpowerq, 
 3943                                                    &work_ccb->ccb_h, 
 3944                                                    xpt_links.stqe);
 3945 
 3946                                 splx(ospl);
 3947                                 continue;
 3948                         } else {
 3949                                 /*
 3950                                  * Consume a high power slot while
 3951                                  * this ccb runs.
 3952                                  */
 3953                                 num_highpower--;
 3954                         }
 3955                 }
 3956                 devq->active_dev = device;
 3957                 cam_ccbq_remove_ccb(&device->ccbq, work_ccb);
 3958 
 3959                 cam_ccbq_send_ccb(&device->ccbq, work_ccb);
 3960                 splx(ospl);
 3961 
 3962                 devq->send_openings--;
 3963                 devq->send_active++;            
 3964                 
 3965                 if (device->ccbq.queue.entries > 0)
 3966                         xpt_schedule_dev_sendq(bus, device);
 3967 
 3968                 if (work_ccb && (work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0){
 3969                         /*
 3970                          * The client wants to freeze the queue
 3971                          * after this CCB is sent.
 3972                          */
 3973                         ospl = splcam();
 3974                         device->qfrozen_cnt++;
 3975                         splx(ospl);
 3976                 }
 3977                 
 3978                 splx(s);
 3979 
 3980                 /* In Target mode, the peripheral driver knows best... */
 3981                 if (work_ccb->ccb_h.func_code == XPT_SCSI_IO) {
 3982                         if ((device->inq_flags & SID_CmdQue) != 0
 3983                          && work_ccb->csio.tag_action != CAM_TAG_ACTION_NONE)
 3984                                 work_ccb->ccb_h.flags |= CAM_TAG_ACTION_VALID;
 3985                         else
 3986                                 /*
 3987                                  * Clear this in case of a retried CCB that
 3988                                  * failed due to a rejected tag.
 3989                                  */
 3990                                 work_ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
 3991                 }
 3992 
 3993                 /*
 3994                  * Device queues can be shared among multiple sim instances
 3995                  * that reside on different busses.  Use the SIM in the queue
 3996                  * CCB's path, rather than the one in the bus that was passed
 3997                  * into this function.
 3998                  */
 3999                 sim = work_ccb->ccb_h.path->bus->sim;
 4000                 (*(sim->sim_action))(sim, work_ccb);
 4001 
 4002                 ospl = splcam();
 4003                 devq->active_dev = NULL;
 4004                 splx(ospl);
 4005                 /* Raise IPL for possible insertion and test at top of loop */
 4006                 s = splsoftcam();
 4007         }
 4008         splx(s);
 4009         s = splcam();
 4010         devq->send_queue.qfrozen_cnt--;
 4011         splx(s);
 4012 }
 4013 
 4014 /*
 4015  * This function merges stuff from the slave ccb into the master ccb, while
 4016  * keeping important fields in the master ccb constant.
 4017  */
 4018 void
 4019 xpt_merge_ccb(union ccb *master_ccb, union ccb *slave_ccb)
 4020 {
 4021         GIANT_REQUIRED;
 4022 
 4023         /*
 4024          * Pull fields that are valid for peripheral drivers to set
 4025          * into the master CCB along with the CCB "payload".
 4026          */
 4027         master_ccb->ccb_h.retry_count = slave_ccb->ccb_h.retry_count;
 4028         master_ccb->ccb_h.func_code = slave_ccb->ccb_h.func_code;
 4029         master_ccb->ccb_h.timeout = slave_ccb->ccb_h.timeout;
 4030         master_ccb->ccb_h.flags = slave_ccb->ccb_h.flags;
 4031         bcopy(&(&slave_ccb->ccb_h)[1], &(&master_ccb->ccb_h)[1],
 4032               sizeof(union ccb) - sizeof(struct ccb_hdr));
 4033 }
 4034 
 4035 void
 4036 xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority)
 4037 {
 4038         GIANT_REQUIRED;
 4039 
 4040         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_setup_ccb\n"));
 4041         ccb_h->pinfo.priority = priority;
 4042         ccb_h->path = path;
 4043         ccb_h->path_id = path->bus->path_id;
 4044         if (path->target)
 4045                 ccb_h->target_id = path->target->target_id;
 4046         else
 4047                 ccb_h->target_id = CAM_TARGET_WILDCARD;
 4048         if (path->device) {
 4049                 ccb_h->target_lun = path->device->lun_id;
 4050                 ccb_h->pinfo.generation = ++path->device->ccbq.queue.generation;
 4051         } else {
 4052                 ccb_h->target_lun = CAM_TARGET_WILDCARD;
 4053         }
 4054         ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
 4055         ccb_h->flags = 0;
 4056 }
 4057 
 4058 /* Path manipulation functions */
 4059 cam_status
 4060 xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph,
 4061                 path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
 4062 {
 4063         struct     cam_path *path;
 4064         cam_status status;
 4065 
 4066         GIANT_REQUIRED;
 4067 
 4068         path = (struct cam_path *)malloc(sizeof(*path), M_CAMXPT, M_NOWAIT);
 4069 
 4070         if (path == NULL) {
 4071                 status = CAM_RESRC_UNAVAIL;
 4072                 return(status);
 4073         }
 4074         status = xpt_compile_path(path, perph, path_id, target_id, lun_id);
 4075         if (status != CAM_REQ_CMP) {
 4076                 free(path, M_CAMXPT);
 4077                 path = NULL;
 4078         }
 4079         *new_path_ptr = path;
 4080         return (status);
 4081 }
 4082 
 4083 static cam_status
 4084 xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph,
 4085                  path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
 4086 {
 4087         struct       cam_eb *bus;
 4088         struct       cam_et *target;
 4089         struct       cam_ed *device;
 4090         cam_status   status;
 4091         int          s;
 4092 
 4093         status = CAM_REQ_CMP;   /* Completed without error */
 4094         target = NULL;          /* Wildcarded */
 4095         device = NULL;          /* Wildcarded */
 4096 
 4097         /*
 4098          * We will potentially modify the EDT, so block interrupts
 4099          * that may attempt to create cam paths.
 4100          */
 4101         s = splcam();
 4102         bus = xpt_find_bus(path_id);
 4103         if (bus == NULL) {
 4104                 status = CAM_PATH_INVALID;
 4105         } else {
 4106                 target = xpt_find_target(bus, target_id);
 4107                 if (target == NULL) {
 4108                         /* Create one */
 4109                         struct cam_et *new_target;
 4110 
 4111                         new_target = xpt_alloc_target(bus, target_id);
 4112                         if (new_target == NULL) {
 4113                                 status = CAM_RESRC_UNAVAIL;
 4114                         } else {
 4115                                 target = new_target;
 4116                         }
 4117                 }
 4118                 if (target != NULL) {
 4119                         device = xpt_find_device(target, lun_id);
 4120                         if (device == NULL) {
 4121                                 /* Create one */
 4122                                 struct cam_ed *new_device;
 4123 
 4124                                 new_device = xpt_alloc_device(bus,
 4125                                                               target,
 4126                                                               lun_id);
 4127                                 if (new_device == NULL) {
 4128                                         status = CAM_RESRC_UNAVAIL;
 4129                                 } else {
 4130                                         device = new_device;
 4131                                 }
 4132                         }
 4133                 }
 4134         }
 4135         splx(s);
 4136 
 4137         /*
 4138          * Only touch the user's data if we are successful.
 4139          */
 4140         if (status == CAM_REQ_CMP) {
 4141                 new_path->periph = perph;
 4142                 new_path->bus = bus;
 4143                 new_path->target = target;
 4144                 new_path->device = device;
 4145                 CAM_DEBUG(new_path, CAM_DEBUG_TRACE, ("xpt_compile_path\n"));
 4146         } else {
 4147                 if (device != NULL)
 4148                         xpt_release_device(bus, target, device);
 4149                 if (target != NULL)
 4150                         xpt_release_target(bus, target);
 4151                 if (bus != NULL)
 4152                         xpt_release_bus(bus);
 4153         }
 4154         return (status);
 4155 }
 4156 
 4157 static void
 4158 xpt_release_path(struct cam_path *path)
 4159 {
 4160         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_path\n"));
 4161         if (path->device != NULL) {
 4162                 xpt_release_device(path->bus, path->target, path->device);
 4163                 path->device = NULL;
 4164         }
 4165         if (path->target != NULL) {
 4166                 xpt_release_target(path->bus, path->target);
 4167                 path->target = NULL;
 4168         }
 4169         if (path->bus != NULL) {
 4170                 xpt_release_bus(path->bus);
 4171                 path->bus = NULL;
 4172         }
 4173 }
 4174 
 4175 void
 4176 xpt_free_path(struct cam_path *path)
 4177 {
 4178         GIANT_REQUIRED;
 4179 
 4180         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_free_path\n"));
 4181         xpt_release_path(path);
 4182         free(path, M_CAMXPT);
 4183 }
 4184 
 4185 
 4186 /*
 4187  * Return -1 for failure, 0 for exact match, 1 for match with wildcards
 4188  * in path1, 2 for match with wildcards in path2.
 4189  */
 4190 int
 4191 xpt_path_comp(struct cam_path *path1, struct cam_path *path2)
 4192 {
 4193         GIANT_REQUIRED;
 4194 
 4195         int retval = 0;
 4196 
 4197         if (path1->bus != path2->bus) {
 4198                 if (path1->bus->path_id == CAM_BUS_WILDCARD)
 4199                         retval = 1;
 4200                 else if (path2->bus->path_id == CAM_BUS_WILDCARD)
 4201                         retval = 2;
 4202                 else
 4203                         return (-1);
 4204         }
 4205         if (path1->target != path2->target) {
 4206                 if (path1->target->target_id == CAM_TARGET_WILDCARD) {
 4207                         if (retval == 0)
 4208                                 retval = 1;
 4209                 } else if (path2->target->target_id == CAM_TARGET_WILDCARD)
 4210                         retval = 2;
 4211                 else
 4212                         return (-1);
 4213         }
 4214         if (path1->device != path2->device) {
 4215                 if (path1->device->lun_id == CAM_LUN_WILDCARD) {
 4216                         if (retval == 0)
 4217                                 retval = 1;
 4218                 } else if (path2->device->lun_id == CAM_LUN_WILDCARD)
 4219                         retval = 2;
 4220                 else
 4221                         return (-1);
 4222         }
 4223         return (retval);
 4224 }
 4225 
 4226 void
 4227 xpt_print_path(struct cam_path *path)
 4228 {
 4229         GIANT_REQUIRED;
 4230 
 4231         if (path == NULL)
 4232                 printf("(nopath): ");
 4233         else {
 4234                 if (path->periph != NULL)
 4235                         printf("(%s%d:", path->periph->periph_name,
 4236                                path->periph->unit_number);
 4237                 else
 4238                         printf("(noperiph:");
 4239 
 4240                 if (path->bus != NULL)
 4241                         printf("%s%d:%d:", path->bus->sim->sim_name,
 4242                                path->bus->sim->unit_number,
 4243                                path->bus->sim->bus_id);
 4244                 else
 4245                         printf("nobus:");
 4246 
 4247                 if (path->target != NULL)
 4248                         printf("%d:", path->target->target_id);
 4249                 else
 4250                         printf("X:");
 4251 
 4252                 if (path->device != NULL)
 4253                         printf("%d): ", path->device->lun_id);
 4254                 else
 4255                         printf("X): ");
 4256         }
 4257 }
 4258 
 4259 int
 4260 xpt_path_string(struct cam_path *path, char *str, size_t str_len)
 4261 {
 4262         struct sbuf sb;
 4263 
 4264         GIANT_REQUIRED;
 4265 
 4266         sbuf_new(&sb, str, str_len, 0);
 4267 
 4268         if (path == NULL)
 4269                 sbuf_printf(&sb, "(nopath): ");
 4270         else {
 4271                 if (path->periph != NULL)
 4272                         sbuf_printf(&sb, "(%s%d:", path->periph->periph_name,
 4273                                     path->periph->unit_number);
 4274                 else
 4275                         sbuf_printf(&sb, "(noperiph:");
 4276 
 4277                 if (path->bus != NULL)
 4278                         sbuf_printf(&sb, "%s%d:%d:", path->bus->sim->sim_name,
 4279                                     path->bus->sim->unit_number,
 4280                                     path->bus->sim->bus_id);
 4281                 else
 4282                         sbuf_printf(&sb, "nobus:");
 4283 
 4284                 if (path->target != NULL)
 4285                         sbuf_printf(&sb, "%d:", path->target->target_id);
 4286                 else
 4287                         sbuf_printf(&sb, "X:");
 4288 
 4289                 if (path->device != NULL)
 4290                         sbuf_printf(&sb, "%d): ", path->device->lun_id);
 4291                 else
 4292                         sbuf_printf(&sb, "X): ");
 4293         }
 4294         sbuf_finish(&sb);
 4295 
 4296         return(sbuf_len(&sb));
 4297 }
 4298 
 4299 path_id_t
 4300 xpt_path_path_id(struct cam_path *path)
 4301 {
 4302         GIANT_REQUIRED;
 4303 
 4304         return(path->bus->path_id);
 4305 }
 4306 
 4307 target_id_t
 4308 xpt_path_target_id(struct cam_path *path)
 4309 {
 4310         GIANT_REQUIRED;
 4311 
 4312         if (path->target != NULL)
 4313                 return (path->target->target_id);
 4314         else
 4315                 return (CAM_TARGET_WILDCARD);
 4316 }
 4317 
 4318 lun_id_t
 4319 xpt_path_lun_id(struct cam_path *path)
 4320 {
 4321         GIANT_REQUIRED;
 4322 
 4323         if (path->device != NULL)
 4324                 return (path->device->lun_id);
 4325         else
 4326                 return (CAM_LUN_WILDCARD);
 4327 }
 4328 
 4329 struct cam_sim *
 4330 xpt_path_sim(struct cam_path *path)
 4331 {
 4332         GIANT_REQUIRED;
 4333 
 4334         return (path->bus->sim);
 4335 }
 4336 
 4337 struct cam_periph*
 4338 xpt_path_periph(struct cam_path *path)
 4339 {
 4340         GIANT_REQUIRED;
 4341 
 4342         return (path->periph);
 4343 }
 4344 
 4345 /*
 4346  * Release a CAM control block for the caller.  Remit the cost of the structure
 4347  * to the device referenced by the path.  If the this device had no 'credits'
 4348  * and peripheral drivers have registered async callbacks for this notification
 4349  * call them now.
 4350  */
 4351 void
 4352 xpt_release_ccb(union ccb *free_ccb)
 4353 {
 4354         int      s;
 4355         struct   cam_path *path;
 4356         struct   cam_ed *device;
 4357         struct   cam_eb *bus;
 4358 
 4359         GIANT_REQUIRED;
 4360 
 4361         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_release_ccb\n"));
 4362         path = free_ccb->ccb_h.path;
 4363         device = path->device;
 4364         bus = path->bus;
 4365         s = splsoftcam();
 4366         cam_ccbq_release_opening(&device->ccbq);
 4367         if (xpt_ccb_count > xpt_max_ccbs) {
 4368                 xpt_free_ccb(free_ccb);
 4369                 xpt_ccb_count--;
 4370         } else {
 4371                 SLIST_INSERT_HEAD(&ccb_freeq, &free_ccb->ccb_h, xpt_links.sle);
 4372         }
 4373         if (bus->sim->devq == NULL) {
 4374                 splx(s);
 4375                 return;
 4376         }
 4377         bus->sim->devq->alloc_openings++;
 4378         bus->sim->devq->alloc_active--;
 4379         /* XXX Turn this into an inline function - xpt_run_device?? */
 4380         if ((device_is_alloc_queued(device) == 0)
 4381          && (device->drvq.entries > 0)) {
 4382                 xpt_schedule_dev_allocq(bus, device);
 4383         }
 4384         splx(s);
 4385         if (dev_allocq_is_runnable(bus->sim->devq))
 4386                 xpt_run_dev_allocq(bus);
 4387 }
 4388 
 4389 /* Functions accessed by SIM drivers */
 4390 
 4391 /*
 4392  * A sim structure, listing the SIM entry points and instance
 4393  * identification info is passed to xpt_bus_register to hook the SIM
 4394  * into the CAM framework.  xpt_bus_register creates a cam_eb entry
 4395  * for this new bus and places it in the array of busses and assigns
 4396  * it a path_id.  The path_id may be influenced by "hard wiring"
 4397  * information specified by the user.  Once interrupt services are
 4398  * availible, the bus will be probed.
 4399  */
 4400 int32_t
 4401 xpt_bus_register(struct cam_sim *sim, u_int32_t bus)
 4402 {
 4403         struct cam_eb *new_bus;
 4404         struct cam_eb *old_bus;
 4405         struct ccb_pathinq cpi;
 4406         int s;
 4407 
 4408         GIANT_REQUIRED;
 4409 
 4410         sim->bus_id = bus;
 4411         new_bus = (struct cam_eb *)malloc(sizeof(*new_bus),
 4412                                           M_CAMXPT, M_NOWAIT);
 4413         if (new_bus == NULL) {
 4414                 /* Couldn't satisfy request */
 4415                 return (CAM_RESRC_UNAVAIL);
 4416         }
 4417 
 4418         if (strcmp(sim->sim_name, "xpt") != 0) {
 4419 
 4420                 sim->path_id =
 4421                     xptpathid(sim->sim_name, sim->unit_number, sim->bus_id);
 4422         }
 4423 
 4424         TAILQ_INIT(&new_bus->et_entries);
 4425         new_bus->path_id = sim->path_id;
 4426         new_bus->sim = sim;
 4427         timevalclear(&new_bus->last_reset);
 4428         new_bus->flags = 0;
 4429         new_bus->refcount = 1;  /* Held until a bus_deregister event */
 4430         new_bus->generation = 0;
 4431         s = splcam();
 4432         old_bus = TAILQ_FIRST(&xpt_busses);
 4433         while (old_bus != NULL
 4434             && old_bus->path_id < new_bus->path_id)
 4435                 old_bus = TAILQ_NEXT(old_bus, links);
 4436         if (old_bus != NULL)
 4437                 TAILQ_INSERT_BEFORE(old_bus, new_bus, links);
 4438         else
 4439                 TAILQ_INSERT_TAIL(&xpt_busses, new_bus, links);
 4440         bus_generation++;
 4441         splx(s);
 4442 
 4443         /* Notify interested parties */
 4444         if (sim->path_id != CAM_XPT_PATH_ID) {
 4445                 struct cam_path path;
 4446 
 4447                 xpt_compile_path(&path, /*periph*/NULL, sim->path_id,
 4448                                  CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
 4449                 xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
 4450                 cpi.ccb_h.func_code = XPT_PATH_INQ;
 4451                 xpt_action((union ccb *)&cpi);
 4452                 xpt_async(AC_PATH_REGISTERED, &path, &cpi);
 4453                 xpt_release_path(&path);
 4454         }
 4455         return (CAM_SUCCESS);
 4456 }
 4457 
 4458 int32_t
 4459 xpt_bus_deregister(path_id_t pathid)
 4460 {
 4461         struct cam_path bus_path;
 4462         struct cam_ed *device;
 4463         struct cam_ed_qinfo *qinfo;
 4464         struct cam_devq *devq;
 4465         struct cam_periph *periph;
 4466         struct cam_sim *ccbsim;
 4467         union ccb *work_ccb;
 4468         cam_status status;
 4469 
 4470         GIANT_REQUIRED;
 4471 
 4472         status = xpt_compile_path(&bus_path, NULL, pathid,
 4473                                   CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
 4474         if (status != CAM_REQ_CMP)
 4475                 return (status);
 4476 
 4477         xpt_async(AC_LOST_DEVICE, &bus_path, NULL);
 4478         xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL);
 4479 
 4480         /* The SIM may be gone, so use a dummy SIM for any stray operations. */
 4481         devq = bus_path.bus->sim->devq;
 4482         bus_path.bus->sim = &cam_dead_sim;
 4483 
 4484         /* Execute any pending operations now. */
 4485         while ((qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->send_queue,
 4486             CAMQ_HEAD)) != NULL ||
 4487             (qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->alloc_queue,
 4488             CAMQ_HEAD)) != NULL) {
 4489                 do {
 4490                         device = qinfo->device;
 4491                         work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD);
 4492                         if (work_ccb != NULL) {
 4493                                 devq->active_dev = device;
 4494                                 cam_ccbq_remove_ccb(&device->ccbq, work_ccb);
 4495                                 cam_ccbq_send_ccb(&device->ccbq, work_ccb);
 4496                                 ccbsim = work_ccb->ccb_h.path->bus->sim;
 4497                                 (*(ccbsim->sim_action))(ccbsim, work_ccb);
 4498                         }
 4499 
 4500                         periph = (struct cam_periph *)camq_remove(&device->drvq,
 4501                             CAMQ_HEAD);
 4502                         if (periph != NULL)
 4503                                 xpt_schedule(periph, periph->pinfo.priority);
 4504                 } while (work_ccb != NULL || periph != NULL);
 4505         }
 4506 
 4507         /* Make sure all completed CCBs are processed. */
 4508         while (!TAILQ_EMPTY(&cam_bioq)) {
 4509                 camisr(&cam_bioq);
 4510 
 4511                 /* Repeat the async's for the benefit of any new devices. */
 4512                 xpt_async(AC_LOST_DEVICE, &bus_path, NULL);
 4513                 xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL);
 4514         }
 4515 
 4516         /* Release the reference count held while registered. */
 4517         xpt_release_bus(bus_path.bus);
 4518         xpt_release_path(&bus_path);
 4519 
 4520         /* Recheck for more completed CCBs. */
 4521         while (!TAILQ_EMPTY(&cam_bioq))
 4522                 camisr(&cam_bioq);
 4523 
 4524         return (CAM_REQ_CMP);
 4525 }
 4526 
 4527 static path_id_t
 4528 xptnextfreepathid(void)
 4529 {
 4530         struct cam_eb *bus;
 4531         path_id_t pathid;
 4532         const char *strval;
 4533 
 4534         pathid = 0;
 4535         bus = TAILQ_FIRST(&xpt_busses);
 4536 retry:
 4537         /* Find an unoccupied pathid */
 4538         while (bus != NULL
 4539             && bus->path_id <= pathid) {
 4540                 if (bus->path_id == pathid)
 4541                         pathid++;
 4542                 bus = TAILQ_NEXT(bus, links);
 4543         }
 4544 
 4545         /*
 4546          * Ensure that this pathid is not reserved for
 4547          * a bus that may be registered in the future.
 4548          */
 4549         if (resource_string_value("scbus", pathid, "at", &strval) == 0) {
 4550                 ++pathid;
 4551                 /* Start the search over */
 4552                 goto retry;
 4553         }
 4554         return (pathid);
 4555 }
 4556 
 4557 static path_id_t
 4558 xptpathid(const char *sim_name, int sim_unit, int sim_bus)
 4559 {
 4560         path_id_t pathid;
 4561         int i, dunit, val;
 4562         char buf[32];
 4563         const char *dname;
 4564 
 4565         pathid = CAM_XPT_PATH_ID;
 4566         snprintf(buf, sizeof(buf), "%s%d", sim_name, sim_unit);
 4567         i = 0;
 4568         while ((resource_find_match(&i, &dname, &dunit, "at", buf)) == 0) {
 4569                 if (strcmp(dname, "scbus")) {
 4570                         /* Avoid a bit of foot shooting. */
 4571                         continue;
 4572                 }
 4573                 if (dunit < 0)          /* unwired?! */
 4574                         continue;
 4575                 if (resource_int_value("scbus", dunit, "bus", &val) == 0) {
 4576                         if (sim_bus == val) {
 4577                                 pathid = dunit;
 4578                                 break;
 4579                         }
 4580                 } else if (sim_bus == 0) {
 4581                         /* Unspecified matches bus 0 */
 4582                         pathid = dunit;
 4583                         break;
 4584                 } else {
 4585                         printf("Ambiguous scbus configuration for %s%d "
 4586                                "bus %d, cannot wire down.  The kernel "
 4587                                "config entry for scbus%d should "
 4588                                "specify a controller bus.\n"
 4589                                "Scbus will be assigned dynamically.\n",
 4590                                sim_name, sim_unit, sim_bus, dunit);
 4591                         break;
 4592                 }
 4593         }
 4594 
 4595         if (pathid == CAM_XPT_PATH_ID)
 4596                 pathid = xptnextfreepathid();
 4597         return (pathid);
 4598 }
 4599 
 4600 void
 4601 xpt_async(u_int32_t async_code, struct cam_path *path, void *async_arg)
 4602 {
 4603         struct cam_eb *bus;
 4604         struct cam_et *target, *next_target;
 4605         struct cam_ed *device, *next_device;
 4606         int s;
 4607 
 4608         GIANT_REQUIRED;
 4609 
 4610         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_async\n"));
 4611 
 4612         /*
 4613          * Most async events come from a CAM interrupt context.  In
 4614          * a few cases, the error recovery code at the peripheral layer,
 4615          * which may run from our SWI or a process context, may signal
 4616          * deferred events with a call to xpt_async. Ensure async
 4617          * notifications are serialized by blocking cam interrupts.
 4618          */
 4619         s = splcam();
 4620 
 4621         bus = path->bus;
 4622 
 4623         if (async_code == AC_BUS_RESET) { 
 4624                 int s;
 4625 
 4626                 s = splclock();
 4627                 /* Update our notion of when the last reset occurred */
 4628                 microtime(&bus->last_reset);
 4629                 splx(s);
 4630         }
 4631 
 4632         for (target = TAILQ_FIRST(&bus->et_entries);
 4633              target != NULL;
 4634              target = next_target) {
 4635 
 4636                 next_target = TAILQ_NEXT(target, links);
 4637 
 4638                 if (path->target != target
 4639                  && path->target->target_id != CAM_TARGET_WILDCARD
 4640                  && target->target_id != CAM_TARGET_WILDCARD)
 4641                         continue;
 4642 
 4643                 if (async_code == AC_SENT_BDR) {
 4644                         int s;
 4645 
 4646                         /* Update our notion of when the last reset occurred */
 4647                         s = splclock();
 4648                         microtime(&path->target->last_reset);
 4649                         splx(s);
 4650                 }
 4651 
 4652                 for (device = TAILQ_FIRST(&target->ed_entries);
 4653                      device != NULL;
 4654                      device = next_device) {
 4655 
 4656                         next_device = TAILQ_NEXT(device, links);
 4657 
 4658                         if (path->device != device 
 4659                          && path->device->lun_id != CAM_LUN_WILDCARD
 4660                          && device->lun_id != CAM_LUN_WILDCARD)
 4661                                 continue;
 4662 
 4663                         xpt_dev_async(async_code, bus, target,
 4664                                       device, async_arg);
 4665 
 4666                         xpt_async_bcast(&device->asyncs, async_code,
 4667                                         path, async_arg);
 4668                 }
 4669         }
 4670         
 4671         /*
 4672          * If this wasn't a fully wildcarded async, tell all
 4673          * clients that want all async events.
 4674          */
 4675         if (bus != xpt_periph->path->bus)
 4676                 xpt_async_bcast(&xpt_periph->path->device->asyncs, async_code,
 4677                                 path, async_arg);
 4678         splx(s);
 4679 }
 4680 
 4681 static void
 4682 xpt_async_bcast(struct async_list *async_head,
 4683                 u_int32_t async_code,
 4684                 struct cam_path *path, void *async_arg)
 4685 {
 4686         struct async_node *cur_entry;
 4687 
 4688         cur_entry = SLIST_FIRST(async_head);
 4689         while (cur_entry != NULL) {
 4690                 struct async_node *next_entry;
 4691                 /*
 4692                  * Grab the next list entry before we call the current
 4693                  * entry's callback.  This is because the callback function
 4694                  * can delete its async callback entry.
 4695                  */
 4696                 next_entry = SLIST_NEXT(cur_entry, links);
 4697                 if ((cur_entry->event_enable & async_code) != 0)
 4698                         cur_entry->callback(cur_entry->callback_arg,
 4699                                             async_code, path,
 4700                                             async_arg);
 4701                 cur_entry = next_entry;
 4702         }
 4703 }
 4704 
 4705 /*
 4706  * Handle any per-device event notifications that require action by the XPT.
 4707  */
 4708 static void
 4709 xpt_dev_async(u_int32_t async_code, struct cam_eb *bus, struct cam_et *target,
 4710               struct cam_ed *device, void *async_arg)
 4711 {
 4712         cam_status status;
 4713         struct cam_path newpath;
 4714 
 4715         /*
 4716          * We only need to handle events for real devices.
 4717          */
 4718         if (target->target_id == CAM_TARGET_WILDCARD
 4719          || device->lun_id == CAM_LUN_WILDCARD)
 4720                 return;
 4721 
 4722         /*
 4723          * We need our own path with wildcards expanded to
 4724          * handle certain types of events.
 4725          */
 4726         if ((async_code == AC_SENT_BDR)
 4727          || (async_code == AC_BUS_RESET)
 4728          || (async_code == AC_INQ_CHANGED))
 4729                 status = xpt_compile_path(&newpath, NULL,
 4730                                           bus->path_id,
 4731                                           target->target_id,
 4732                                           device->lun_id);
 4733         else
 4734                 status = CAM_REQ_CMP_ERR;
 4735 
 4736         if (status == CAM_REQ_CMP) {
 4737 
 4738                 /*
 4739                  * Allow transfer negotiation to occur in a
 4740                  * tag free environment.
 4741                  */
 4742                 if (async_code == AC_SENT_BDR
 4743                  || async_code == AC_BUS_RESET)
 4744                         xpt_toggle_tags(&newpath);
 4745 
 4746                 if (async_code == AC_INQ_CHANGED) {
 4747                         /*
 4748                          * We've sent a start unit command, or
 4749                          * something similar to a device that
 4750                          * may have caused its inquiry data to
 4751                          * change. So we re-scan the device to
 4752                          * refresh the inquiry data for it.
 4753                          */
 4754                         xpt_scan_lun(newpath.periph, &newpath,
 4755                                      CAM_EXPECT_INQ_CHANGE, NULL);
 4756                 }
 4757                 xpt_release_path(&newpath);
 4758         } else if (async_code == AC_LOST_DEVICE) {
 4759                 device->flags |= CAM_DEV_UNCONFIGURED;
 4760         } else if (async_code == AC_TRANSFER_NEG) {
 4761                 struct ccb_trans_settings *settings;
 4762 
 4763                 settings = (struct ccb_trans_settings *)async_arg;
 4764                 xpt_set_transfer_settings(settings, device,
 4765                                           /*async_update*/TRUE);
 4766         }
 4767 }
 4768 
 4769 u_int32_t
 4770 xpt_freeze_devq(struct cam_path *path, u_int count)
 4771 {
 4772         int s;
 4773         struct ccb_hdr *ccbh;
 4774 
 4775         GIANT_REQUIRED;
 4776 
 4777         s = splcam();
 4778         path->device->qfrozen_cnt += count;
 4779 
 4780         /*
 4781          * Mark the last CCB in the queue as needing
 4782          * to be requeued if the driver hasn't
 4783          * changed it's state yet.  This fixes a race
 4784          * where a ccb is just about to be queued to
 4785          * a controller driver when it's interrupt routine
 4786          * freezes the queue.  To completly close the
 4787          * hole, controller drives must check to see
 4788          * if a ccb's status is still CAM_REQ_INPROG
 4789          * under spl protection just before they queue
 4790          * the CCB.  See ahc_action/ahc_freeze_devq for
 4791          * an example.
 4792          */
 4793         ccbh = TAILQ_LAST(&path->device->ccbq.active_ccbs, ccb_hdr_tailq);
 4794         if (ccbh && ccbh->status == CAM_REQ_INPROG)
 4795                 ccbh->status = CAM_REQUEUE_REQ;
 4796         splx(s);
 4797         return (path->device->qfrozen_cnt);
 4798 }
 4799 
 4800 u_int32_t
 4801 xpt_freeze_simq(struct cam_sim *sim, u_int count)
 4802 {
 4803         GIANT_REQUIRED;
 4804 
 4805         sim->devq->send_queue.qfrozen_cnt += count;
 4806         if (sim->devq->active_dev != NULL) {
 4807                 struct ccb_hdr *ccbh;
 4808                 
 4809                 ccbh = TAILQ_LAST(&sim->devq->active_dev->ccbq.active_ccbs,
 4810                                   ccb_hdr_tailq);
 4811                 if (ccbh && ccbh->status == CAM_REQ_INPROG)
 4812                         ccbh->status = CAM_REQUEUE_REQ;
 4813         }
 4814         return (sim->devq->send_queue.qfrozen_cnt);
 4815 }
 4816 
 4817 static void
 4818 xpt_release_devq_timeout(void *arg)
 4819 {
 4820         struct cam_ed *device;
 4821 
 4822         device = (struct cam_ed *)arg;
 4823 
 4824         xpt_release_devq_device(device, /*count*/1, /*run_queue*/TRUE);
 4825 }
 4826 
 4827 void
 4828 xpt_release_devq(struct cam_path *path, u_int count, int run_queue)
 4829 {
 4830         GIANT_REQUIRED;
 4831 
 4832         xpt_release_devq_device(path->device, count, run_queue);
 4833 }
 4834 
 4835 static void
 4836 xpt_release_devq_device(struct cam_ed *dev, u_int count, int run_queue)
 4837 {
 4838         int     rundevq;
 4839         int     s0, s1;
 4840 
 4841         rundevq = 0;
 4842         s0 = splsoftcam();
 4843         s1 = splcam();
 4844         if (dev->qfrozen_cnt > 0) {
 4845 
 4846                 count = (count > dev->qfrozen_cnt) ? dev->qfrozen_cnt : count;
 4847                 dev->qfrozen_cnt -= count;
 4848                 if (dev->qfrozen_cnt == 0) {
 4849 
 4850                         /*
 4851                          * No longer need to wait for a successful
 4852                          * command completion.
 4853                          */
 4854                         dev->flags &= ~CAM_DEV_REL_ON_COMPLETE;
 4855 
 4856                         /*
 4857                          * Remove any timeouts that might be scheduled
 4858                          * to release this queue.
 4859                          */
 4860                         if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
 4861                                 untimeout(xpt_release_devq_timeout, dev,
 4862                                           dev->c_handle);
 4863                                 dev->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING;
 4864                         }
 4865 
 4866                         /*
 4867                          * Now that we are unfrozen schedule the
 4868                          * device so any pending transactions are
 4869                          * run.
 4870                          */
 4871                         if ((dev->ccbq.queue.entries > 0)
 4872                          && (xpt_schedule_dev_sendq(dev->target->bus, dev))
 4873                          && (run_queue != 0)) {
 4874                                 rundevq = 1;
 4875                         }
 4876                 }
 4877         }
 4878         splx(s1);
 4879         if (rundevq != 0)
 4880                 xpt_run_dev_sendq(dev->target->bus);
 4881         splx(s0);
 4882 }
 4883 
 4884 void
 4885 xpt_release_simq(struct cam_sim *sim, int run_queue)
 4886 {
 4887         int     s;
 4888         struct  camq *sendq;
 4889 
 4890         GIANT_REQUIRED;
 4891 
 4892         sendq = &(sim->devq->send_queue);
 4893         s = splcam();
 4894         if (sendq->qfrozen_cnt > 0) {
 4895 
 4896                 sendq->qfrozen_cnt--;
 4897                 if (sendq->qfrozen_cnt == 0) {
 4898                         struct cam_eb *bus;
 4899 
 4900                         /*
 4901                          * If there is a timeout scheduled to release this
 4902                          * sim queue, remove it.  The queue frozen count is
 4903                          * already at 0.
 4904                          */
 4905                         if ((sim->flags & CAM_SIM_REL_TIMEOUT_PENDING) != 0){
 4906                                 untimeout(xpt_release_simq_timeout, sim,
 4907                                           sim->c_handle);
 4908                                 sim->flags &= ~CAM_SIM_REL_TIMEOUT_PENDING;
 4909                         }
 4910                         bus = xpt_find_bus(sim->path_id);
 4911                         splx(s);
 4912 
 4913                         if (run_queue) {
 4914                                 /*
 4915                                  * Now that we are unfrozen run the send queue.
 4916                                  */
 4917                                 xpt_run_dev_sendq(bus);
 4918                         }
 4919                         xpt_release_bus(bus);
 4920                 } else
 4921                         splx(s);
 4922         } else
 4923                 splx(s);
 4924 }
 4925 
 4926 static void
 4927 xpt_release_simq_timeout(void *arg)
 4928 {
 4929         struct cam_sim *sim;
 4930 
 4931         sim = (struct cam_sim *)arg;
 4932         xpt_release_simq(sim, /* run_queue */ TRUE);
 4933 }
 4934 
 4935 void
 4936 xpt_done(union ccb *done_ccb)
 4937 {
 4938         int s;
 4939 
 4940         s = splcam();
 4941 
 4942         CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_done\n"));
 4943         if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) != 0) {
 4944                 /*
 4945                  * Queue up the request for handling by our SWI handler
 4946                  * any of the "non-immediate" type of ccbs.
 4947                  */
 4948                 switch (done_ccb->ccb_h.path->periph->type) {
 4949                 case CAM_PERIPH_BIO:
 4950                         mtx_lock(&cam_bioq_lock);
 4951                         TAILQ_INSERT_TAIL(&cam_bioq, &done_ccb->ccb_h,
 4952                                           sim_links.tqe);
 4953                         done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX;
 4954                         mtx_unlock(&cam_bioq_lock);
 4955                         swi_sched(cambio_ih, 0);
 4956                         break;
 4957                 default:
 4958                         panic("unknown periph type %d",
 4959                             done_ccb->ccb_h.path->periph->type);
 4960                 }
 4961         }
 4962         splx(s);
 4963 }
 4964 
 4965 union ccb *
 4966 xpt_alloc_ccb()
 4967 {
 4968         union ccb *new_ccb;
 4969 
 4970         GIANT_REQUIRED;
 4971 
 4972         new_ccb = malloc(sizeof(*new_ccb), M_CAMXPT, M_WAITOK);
 4973         return (new_ccb);
 4974 }
 4975 
 4976 union ccb *
 4977 xpt_alloc_ccb_nowait()
 4978 {
 4979         union ccb *new_ccb;
 4980 
 4981         GIANT_REQUIRED;
 4982 
 4983         new_ccb = malloc(sizeof(*new_ccb), M_CAMXPT, M_NOWAIT);
 4984         return (new_ccb);
 4985 }
 4986 
 4987 void
 4988 xpt_free_ccb(union ccb *free_ccb)
 4989 {
 4990         free(free_ccb, M_CAMXPT);
 4991 }
 4992 
 4993 
 4994 
 4995 /* Private XPT functions */
 4996 
 4997 /*
 4998  * Get a CAM control block for the caller. Charge the structure to the device
 4999  * referenced by the path.  If the this device has no 'credits' then the
 5000  * device already has the maximum number of outstanding operations under way
 5001  * and we return NULL. If we don't have sufficient resources to allocate more
 5002  * ccbs, we also return NULL.
 5003  */
 5004 static union ccb *
 5005 xpt_get_ccb(struct cam_ed *device)
 5006 {
 5007         union ccb *new_ccb;
 5008         int s;
 5009 
 5010         s = splsoftcam();
 5011         if ((new_ccb = (union ccb *)SLIST_FIRST(&ccb_freeq)) == NULL) {
 5012                 new_ccb = xpt_alloc_ccb_nowait();
 5013                 if (new_ccb == NULL) {
 5014                         splx(s);
 5015                         return (NULL);
 5016                 }
 5017                 callout_handle_init(&new_ccb->ccb_h.timeout_ch);
 5018                 SLIST_INSERT_HEAD(&ccb_freeq, &new_ccb->ccb_h,
 5019                                   xpt_links.sle);
 5020                 xpt_ccb_count++;
 5021         }
 5022         cam_ccbq_take_opening(&device->ccbq);
 5023         SLIST_REMOVE_HEAD(&ccb_freeq, xpt_links.sle);
 5024         splx(s);
 5025         return (new_ccb);
 5026 }
 5027 
 5028 static void
 5029 xpt_release_bus(struct cam_eb *bus)
 5030 {
 5031         int s;
 5032 
 5033         s = splcam();
 5034         if ((--bus->refcount == 0)
 5035          && (TAILQ_FIRST(&bus->et_entries) == NULL)) {
 5036                 TAILQ_REMOVE(&xpt_busses, bus, links);
 5037                 bus_generation++;
 5038                 splx(s);
 5039                 free(bus, M_CAMXPT);
 5040         } else
 5041                 splx(s);
 5042 }
 5043 
 5044 static struct cam_et *
 5045 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id)
 5046 {
 5047         struct cam_et *target;
 5048 
 5049         target = (struct cam_et *)malloc(sizeof(*target), M_CAMXPT, M_NOWAIT);
 5050         if (target != NULL) {
 5051                 struct cam_et *cur_target;
 5052 
 5053                 TAILQ_INIT(&target->ed_entries);
 5054                 target->bus = bus;
 5055                 target->target_id = target_id;
 5056                 target->refcount = 1;
 5057                 target->generation = 0;
 5058                 timevalclear(&target->last_reset);
 5059                 /*
 5060                  * Hold a reference to our parent bus so it
 5061                  * will not go away before we do.
 5062                  */
 5063                 bus->refcount++;
 5064 
 5065                 /* Insertion sort into our bus's target list */
 5066                 cur_target = TAILQ_FIRST(&bus->et_entries);
 5067                 while (cur_target != NULL && cur_target->target_id < target_id)
 5068                         cur_target = TAILQ_NEXT(cur_target, links);
 5069 
 5070                 if (cur_target != NULL) {
 5071                         TAILQ_INSERT_BEFORE(cur_target, target, links);
 5072                 } else {
 5073                         TAILQ_INSERT_TAIL(&bus->et_entries, target, links);
 5074                 }
 5075                 bus->generation++;
 5076         }
 5077         return (target);
 5078 }
 5079 
 5080 static void
 5081 xpt_release_target(struct cam_eb *bus, struct cam_et *target)
 5082 {
 5083         int s;
 5084 
 5085         s = splcam();
 5086         if ((--target->refcount == 0)
 5087          && (TAILQ_FIRST(&target->ed_entries) == NULL)) {
 5088                 TAILQ_REMOVE(&bus->et_entries, target, links);
 5089                 bus->generation++;
 5090                 splx(s);
 5091                 free(target, M_CAMXPT);
 5092                 xpt_release_bus(bus);
 5093         } else
 5094                 splx(s);
 5095 }
 5096 
 5097 static struct cam_ed *
 5098 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id)
 5099 {
 5100 #ifdef CAM_NEW_TRAN_CODE
 5101         struct     cam_path path;
 5102 #endif /* CAM_NEW_TRAN_CODE */
 5103         struct     cam_ed *device;
 5104         struct     cam_devq *devq;
 5105         cam_status status;
 5106 
 5107         if (SIM_DEAD(bus->sim))
 5108                 return (NULL);
 5109 
 5110         /* Make space for us in the device queue on our bus */
 5111         devq = bus->sim->devq;
 5112         status = cam_devq_resize(devq, devq->alloc_queue.array_size + 1);
 5113 
 5114         if (status != CAM_REQ_CMP) {
 5115                 device = NULL;
 5116         } else {
 5117                 device = (struct cam_ed *)malloc(sizeof(*device),
 5118                                                  M_CAMXPT, M_NOWAIT);
 5119         }
 5120 
 5121         if (device != NULL) {
 5122                 struct cam_ed *cur_device;
 5123 
 5124                 cam_init_pinfo(&device->alloc_ccb_entry.pinfo);
 5125                 device->alloc_ccb_entry.device = device;
 5126                 cam_init_pinfo(&device->send_ccb_entry.pinfo);
 5127                 device->send_ccb_entry.device = device;
 5128                 device->target = target;
 5129                 device->lun_id = lun_id;
 5130                 /* Initialize our queues */
 5131                 if (camq_init(&device->drvq, 0) != 0) {
 5132                         free(device, M_CAMXPT);
 5133                         return (NULL);
 5134                 }
 5135                 if (cam_ccbq_init(&device->ccbq,
 5136                                   bus->sim->max_dev_openings) != 0) {
 5137                         camq_fini(&device->drvq);
 5138                         free(device, M_CAMXPT);
 5139                         return (NULL);
 5140                 }
 5141                 SLIST_INIT(&device->asyncs);
 5142                 SLIST_INIT(&device->periphs);
 5143                 device->generation = 0;
 5144                 device->owner = NULL;
 5145                 /*
 5146                  * Take the default quirk entry until we have inquiry
 5147                  * data and can determine a better quirk to use.
 5148                  */
 5149                 device->quirk = &xpt_quirk_table[xpt_quirk_table_size - 1];
 5150                 bzero(&device->inq_data, sizeof(device->inq_data));
 5151                 device->inq_flags = 0;
 5152                 device->queue_flags = 0;
 5153                 device->serial_num = NULL;
 5154                 device->serial_num_len = 0;
 5155                 device->qfrozen_cnt = 0;
 5156                 device->flags = CAM_DEV_UNCONFIGURED;
 5157                 device->tag_delay_count = 0;
 5158                 device->tag_saved_openings = 0;
 5159                 device->refcount = 1;
 5160                 callout_handle_init(&device->c_handle);
 5161 
 5162                 /*
 5163                  * Hold a reference to our parent target so it
 5164                  * will not go away before we do.
 5165                  */
 5166                 target->refcount++;
 5167 
 5168                 /*
 5169                  * XXX should be limited by number of CCBs this bus can
 5170                  * do.
 5171                  */
 5172                 xpt_max_ccbs += device->ccbq.devq_openings;
 5173                 /* Insertion sort into our target's device list */
 5174                 cur_device = TAILQ_FIRST(&target->ed_entries);
 5175                 while (cur_device != NULL && cur_device->lun_id < lun_id)
 5176                         cur_device = TAILQ_NEXT(cur_device, links);
 5177                 if (cur_device != NULL) {
 5178                         TAILQ_INSERT_BEFORE(cur_device, device, links);
 5179                 } else {
 5180                         TAILQ_INSERT_TAIL(&target->ed_entries, device, links);
 5181                 }
 5182                 target->generation++;
 5183 #ifdef CAM_NEW_TRAN_CODE
 5184                 if (lun_id != CAM_LUN_WILDCARD) {
 5185                         xpt_compile_path(&path,
 5186                                          NULL,
 5187                                          bus->path_id,
 5188                                          target->target_id,
 5189                                          lun_id);
 5190                         xpt_devise_transport(&path);
 5191                         xpt_release_path(&path);
 5192                 }
 5193 #endif /* CAM_NEW_TRAN_CODE */
 5194         }
 5195         return (device);
 5196 }
 5197 
 5198 static void
 5199 xpt_release_device(struct cam_eb *bus, struct cam_et *target,
 5200                    struct cam_ed *device)
 5201 {
 5202         int s;
 5203 
 5204         s = splcam();
 5205         if ((--device->refcount == 0)
 5206          && ((device->flags & CAM_DEV_UNCONFIGURED) != 0)) {
 5207                 struct cam_devq *devq;
 5208 
 5209                 if (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX
 5210                  || device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX)
 5211                         panic("Removing device while still queued for ccbs");
 5212 
 5213                 if ((device->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0)
 5214                                 untimeout(xpt_release_devq_timeout, device,
 5215                                           device->c_handle);
 5216 
 5217                 TAILQ_REMOVE(&target->ed_entries, device,links);
 5218                 target->generation++;
 5219                 xpt_max_ccbs -= device->ccbq.devq_openings;
 5220                 if (!SIM_DEAD(bus->sim)) {
 5221                         /* Release our slot in the devq */
 5222                         devq = bus->sim->devq;
 5223                         cam_devq_resize(devq, devq->alloc_queue.array_size - 1);
 5224                 }
 5225                 splx(s);
 5226                 camq_fini(&device->drvq);
 5227                 camq_fini(&device->ccbq.queue);
 5228                 free(device, M_CAMXPT);
 5229                 xpt_release_target(bus, target);
 5230         } else
 5231                 splx(s);
 5232 }
 5233 
 5234 static u_int32_t
 5235 xpt_dev_ccbq_resize(struct cam_path *path, int newopenings)
 5236 {
 5237         int     s;
 5238         int     diff;
 5239         int     result;
 5240         struct  cam_ed *dev;
 5241 
 5242         dev = path->device;
 5243         s = splsoftcam();
 5244 
 5245         diff = newopenings - (dev->ccbq.dev_active + dev->ccbq.dev_openings);
 5246         result = cam_ccbq_resize(&dev->ccbq, newopenings);
 5247         if (result == CAM_REQ_CMP && (diff < 0)) {
 5248                 dev->flags |= CAM_DEV_RESIZE_QUEUE_NEEDED;
 5249         }
 5250         if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
 5251          || (dev->inq_flags & SID_CmdQue) != 0)
 5252                 dev->tag_saved_openings = newopenings;
 5253         /* Adjust the global limit */
 5254         xpt_max_ccbs += diff;
 5255         splx(s);
 5256         return (result);
 5257 }
 5258 
 5259 static struct cam_eb *
 5260 xpt_find_bus(path_id_t path_id)
 5261 {
 5262         struct cam_eb *bus;
 5263 
 5264         for (bus = TAILQ_FIRST(&xpt_busses);
 5265              bus != NULL;
 5266              bus = TAILQ_NEXT(bus, links)) {
 5267                 if (bus->path_id == path_id) {
 5268                         bus->refcount++;
 5269                         break;
 5270                 }
 5271         }
 5272         return (bus);
 5273 }
 5274 
 5275 static struct cam_et *
 5276 xpt_find_target(struct cam_eb *bus, target_id_t target_id)
 5277 {
 5278         struct cam_et *target;
 5279 
 5280         for (target = TAILQ_FIRST(&bus->et_entries);
 5281              target != NULL;
 5282              target = TAILQ_NEXT(target, links)) {
 5283                 if (target->target_id == target_id) {
 5284                         target->refcount++;
 5285                         break;
 5286                 }
 5287         }
 5288         return (target);
 5289 }
 5290 
 5291 static struct cam_ed *
 5292 xpt_find_device(struct cam_et *target, lun_id_t lun_id)
 5293 {
 5294         struct cam_ed *device;
 5295 
 5296         for (device = TAILQ_FIRST(&target->ed_entries);
 5297              device != NULL;
 5298              device = TAILQ_NEXT(device, links)) {
 5299                 if (device->lun_id == lun_id) {
 5300                         device->refcount++;
 5301                         break;
 5302                 }
 5303         }
 5304         return (device);
 5305 }
 5306 
 5307 typedef struct {
 5308         union   ccb *request_ccb;
 5309         struct  ccb_pathinq *cpi;
 5310         int     pending_count;
 5311 } xpt_scan_bus_info;
 5312 
 5313 /*
 5314  * To start a scan, request_ccb is an XPT_SCAN_BUS ccb.
 5315  * As the scan progresses, xpt_scan_bus is used as the
 5316  * callback on completion function.
 5317  */
 5318 static void
 5319 xpt_scan_bus(struct cam_periph *periph, union ccb *request_ccb)
 5320 {
 5321         CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE,
 5322                   ("xpt_scan_bus\n"));
 5323         switch (request_ccb->ccb_h.func_code) {
 5324         case XPT_SCAN_BUS:
 5325         {
 5326                 xpt_scan_bus_info *scan_info;
 5327                 union   ccb *work_ccb;
 5328                 struct  cam_path *path;
 5329                 u_int   i;
 5330                 u_int   max_target;
 5331                 u_int   initiator_id;
 5332 
 5333                 /* Find out the characteristics of the bus */
 5334                 work_ccb = xpt_alloc_ccb();
 5335                 xpt_setup_ccb(&work_ccb->ccb_h, request_ccb->ccb_h.path,
 5336                               request_ccb->ccb_h.pinfo.priority);
 5337                 work_ccb->ccb_h.func_code = XPT_PATH_INQ;
 5338                 xpt_action(work_ccb);
 5339                 if (work_ccb->ccb_h.status != CAM_REQ_CMP) {
 5340                         request_ccb->ccb_h.status = work_ccb->ccb_h.status;
 5341                         xpt_free_ccb(work_ccb);
 5342                         xpt_done(request_ccb);
 5343                         return;
 5344                 }
 5345 
 5346                 if ((work_ccb->cpi.hba_misc & PIM_NOINITIATOR) != 0) {
 5347                         /*
 5348                          * Can't scan the bus on an adapter that
 5349                          * cannot perform the initiator role.
 5350                          */
 5351                         request_ccb->ccb_h.status = CAM_REQ_CMP;
 5352                         xpt_free_ccb(work_ccb);
 5353                         xpt_done(request_ccb);
 5354                         return;
 5355                 }
 5356 
 5357                 /* Save some state for use while we probe for devices */
 5358                 scan_info = (xpt_scan_bus_info *)
 5359                     malloc(sizeof(xpt_scan_bus_info), M_TEMP, M_WAITOK);
 5360                 scan_info->request_ccb = request_ccb;
 5361                 scan_info->cpi = &work_ccb->cpi;
 5362 
 5363                 /* Cache on our stack so we can work asynchronously */
 5364                 max_target = scan_info->cpi->max_target;
 5365                 initiator_id = scan_info->cpi->initiator_id;
 5366 
 5367                 /*
 5368                  * Don't count the initiator if the
 5369                  * initiator is addressable.
 5370                  */
 5371                 scan_info->pending_count = max_target + 1;
 5372                 if (initiator_id <= max_target)
 5373                         scan_info->pending_count--;
 5374 
 5375                 for (i = 0; i <= max_target; i++) {
 5376                         cam_status status;
 5377                         if (i == initiator_id)
 5378                                 continue;
 5379 
 5380                         status = xpt_create_path(&path, xpt_periph,
 5381                                                  request_ccb->ccb_h.path_id,
 5382                                                  i, 0);
 5383                         if (status != CAM_REQ_CMP) {
 5384                                 printf("xpt_scan_bus: xpt_create_path failed"
 5385                                        " with status %#x, bus scan halted\n",
 5386                                        status);
 5387                                 break;
 5388                         }
 5389                         work_ccb = xpt_alloc_ccb();
 5390                         xpt_setup_ccb(&work_ccb->ccb_h, path,
 5391                                       request_ccb->ccb_h.pinfo.priority);
 5392                         work_ccb->ccb_h.func_code = XPT_SCAN_LUN;
 5393                         work_ccb->ccb_h.cbfcnp = xpt_scan_bus;
 5394                         work_ccb->ccb_h.ppriv_ptr0 = scan_info;
 5395                         work_ccb->crcn.flags = request_ccb->crcn.flags;
 5396                         xpt_action(work_ccb);
 5397                 }
 5398                 break;
 5399         }
 5400         case XPT_SCAN_LUN:
 5401         {
 5402                 xpt_scan_bus_info *scan_info;
 5403                 path_id_t path_id;
 5404                 target_id_t target_id;
 5405                 lun_id_t lun_id;
 5406 
 5407                 /* Reuse the same CCB to query if a device was really found */
 5408                 scan_info = (xpt_scan_bus_info *)request_ccb->ccb_h.ppriv_ptr0;
 5409                 xpt_setup_ccb(&request_ccb->ccb_h, request_ccb->ccb_h.path,
 5410                               request_ccb->ccb_h.pinfo.priority);
 5411                 request_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
 5412 
 5413                 path_id = request_ccb->ccb_h.path_id;
 5414                 target_id = request_ccb->ccb_h.target_id;
 5415                 lun_id = request_ccb->ccb_h.target_lun;
 5416                 xpt_action(request_ccb);
 5417 
 5418                 if (request_ccb->ccb_h.status != CAM_REQ_CMP) {
 5419                         struct cam_ed *device;
 5420                         struct cam_et *target;
 5421                         int s, phl;
 5422 
 5423                         /*
 5424                          * If we already probed lun 0 successfully, or
 5425                          * we have additional configured luns on this
 5426                          * target that might have "gone away", go onto
 5427                          * the next lun.
 5428                          */
 5429                         target = request_ccb->ccb_h.path->target;
 5430                         /*
 5431                          * We may touch devices that we don't
 5432                          * hold references too, so ensure they
 5433                          * don't disappear out from under us.
 5434                          * The target above is referenced by the
 5435                          * path in the request ccb.
 5436                          */
 5437                         phl = 0;
 5438                         s = splcam();
 5439                         device = TAILQ_FIRST(&target->ed_entries);
 5440                         if (device != NULL) {
 5441                                 phl = CAN_SRCH_HI_SPARSE(device);
 5442                                 if (device->lun_id == 0)
 5443                                         device = TAILQ_NEXT(device, links);
 5444                         }
 5445                         splx(s);
 5446                         if ((lun_id != 0) || (device != NULL)) {
 5447                                 if (lun_id < (CAM_SCSI2_MAXLUN-1) || phl)
 5448                                         lun_id++;
 5449                         }
 5450                 } else {
 5451                         struct cam_ed *device;
 5452                         
 5453                         device = request_ccb->ccb_h.path->device;
 5454 
 5455                         if ((device->quirk->quirks & CAM_QUIRK_NOLUNS) == 0) {
 5456                                 /* Try the next lun */
 5457                                 if (lun_id < (CAM_SCSI2_MAXLUN-1)
 5458                                   || CAN_SRCH_HI_DENSE(device))
 5459                                         lun_id++;
 5460                         }
 5461                 }
 5462 
 5463                 xpt_free_path(request_ccb->ccb_h.path);
 5464 
 5465                 /* Check Bounds */
 5466                 if ((lun_id == request_ccb->ccb_h.target_lun)
 5467                  || lun_id > scan_info->cpi->max_lun) {
 5468                         /* We're done */
 5469 
 5470                         xpt_free_ccb(request_ccb);
 5471                         scan_info->pending_count--;
 5472                         if (scan_info->pending_count == 0) {
 5473                                 xpt_free_ccb((union ccb *)scan_info->cpi);
 5474                                 request_ccb = scan_info->request_ccb;
 5475                                 free(scan_info, M_TEMP);
 5476                                 request_ccb->ccb_h.status = CAM_REQ_CMP;
 5477                                 xpt_done(request_ccb);
 5478                         }
 5479                 } else {
 5480                         /* Try the next device */
 5481                         struct cam_path *path;
 5482                         cam_status status;
 5483 
 5484                         status = xpt_create_path(&path, xpt_periph,
 5485                                                  path_id, target_id, lun_id);
 5486                         if (status != CAM_REQ_CMP) {
 5487                                 printf("xpt_scan_bus: xpt_create_path failed "
 5488                                        "with status %#x, halting LUN scan\n",
 5489                                        status);
 5490                                 xpt_free_ccb(request_ccb);
 5491                                 scan_info->pending_count--;
 5492                                 if (scan_info->pending_count == 0) {
 5493                                         xpt_free_ccb(
 5494                                                 (union ccb *)scan_info->cpi);
 5495                                         request_ccb = scan_info->request_ccb;
 5496                                         free(scan_info, M_TEMP);
 5497                                         request_ccb->ccb_h.status = CAM_REQ_CMP;
 5498                                         xpt_done(request_ccb);
 5499                                 }
 5500                                 break;
 5501                         }
 5502                         xpt_setup_ccb(&request_ccb->ccb_h, path,
 5503                                       request_ccb->ccb_h.pinfo.priority);
 5504                         request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
 5505                         request_ccb->ccb_h.cbfcnp = xpt_scan_bus;
 5506                         request_ccb->ccb_h.ppriv_ptr0 = scan_info;
 5507                         request_ccb->crcn.flags =
 5508                                 scan_info->request_ccb->crcn.flags;
 5509                         xpt_action(request_ccb);
 5510                 }
 5511                 break;
 5512         }
 5513         default:
 5514                 break;
 5515         }
 5516 }
 5517 
 5518 typedef enum {
 5519         PROBE_TUR,
 5520         PROBE_INQUIRY,
 5521         PROBE_FULL_INQUIRY,
 5522         PROBE_MODE_SENSE,
 5523         PROBE_SERIAL_NUM,
 5524         PROBE_TUR_FOR_NEGOTIATION
 5525 } probe_action;
 5526 
 5527 typedef enum {
 5528         PROBE_INQUIRY_CKSUM     = 0x01,
 5529         PROBE_SERIAL_CKSUM      = 0x02,
 5530         PROBE_NO_ANNOUNCE       = 0x04
 5531 } probe_flags;
 5532 
 5533 typedef struct {
 5534         TAILQ_HEAD(, ccb_hdr) request_ccbs;
 5535         probe_action    action;
 5536         union ccb       saved_ccb;
 5537         probe_flags     flags;
 5538         MD5_CTX         context;
 5539         u_int8_t        digest[16];
 5540 } probe_softc;
 5541 
 5542 static void
 5543 xpt_scan_lun(struct cam_periph *periph, struct cam_path *path,
 5544              cam_flags flags, union ccb *request_ccb)
 5545 {
 5546         struct ccb_pathinq cpi;
 5547         cam_status status;
 5548         struct cam_path *new_path;
 5549         struct cam_periph *old_periph;
 5550         int s;
 5551         
 5552         CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE,
 5553                   ("xpt_scan_lun\n"));
 5554         
 5555         xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
 5556         cpi.ccb_h.func_code = XPT_PATH_INQ;
 5557         xpt_action((union ccb *)&cpi);
 5558 
 5559         if (cpi.ccb_h.status != CAM_REQ_CMP) {
 5560                 if (request_ccb != NULL) {
 5561                         request_ccb->ccb_h.status = cpi.ccb_h.status;
 5562                         xpt_done(request_ccb);
 5563                 }
 5564                 return;
 5565         }
 5566 
 5567         if ((cpi.hba_misc & PIM_NOINITIATOR) != 0) {
 5568                 /*
 5569                  * Can't scan the bus on an adapter that
 5570                  * cannot perform the initiator role.
 5571                  */
 5572                 if (request_ccb != NULL) {
 5573                         request_ccb->ccb_h.status = CAM_REQ_CMP;
 5574                         xpt_done(request_ccb);
 5575                 }
 5576                 return;
 5577         }
 5578 
 5579         if (request_ccb == NULL) {
 5580                 request_ccb = malloc(sizeof(union ccb), M_TEMP, M_NOWAIT);
 5581                 if (request_ccb == NULL) {
 5582                         xpt_print_path(path);
 5583                         printf("xpt_scan_lun: can't allocate CCB, can't "
 5584                                "continue\n");
 5585                         return;
 5586                 }
 5587                 new_path = malloc(sizeof(*new_path), M_TEMP, M_NOWAIT);
 5588                 if (new_path == NULL) {
 5589                         xpt_print_path(path);
 5590                         printf("xpt_scan_lun: can't allocate path, can't "
 5591                                "continue\n");
 5592                         free(request_ccb, M_TEMP);
 5593                         return;
 5594                 }
 5595                 status = xpt_compile_path(new_path, xpt_periph,
 5596                                           path->bus->path_id,
 5597                                           path->target->target_id,
 5598                                           path->device->lun_id);
 5599 
 5600                 if (status != CAM_REQ_CMP) {
 5601                         xpt_print_path(path);
 5602                         printf("xpt_scan_lun: can't compile path, can't "
 5603                                "continue\n");
 5604                         free(request_ccb, M_TEMP);
 5605                         free(new_path, M_TEMP);
 5606                         return;
 5607                 }
 5608                 xpt_setup_ccb(&request_ccb->ccb_h, new_path, /*priority*/ 1);
 5609                 request_ccb->ccb_h.cbfcnp = xptscandone;
 5610                 request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
 5611                 request_ccb->crcn.flags = flags;
 5612         }
 5613 
 5614         s = splsoftcam();
 5615         if ((old_periph = cam_periph_find(path, "probe")) != NULL) {
 5616                 probe_softc *softc;
 5617 
 5618                 softc = (probe_softc *)old_periph->softc;
 5619                 TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h,
 5620                                   periph_links.tqe);
 5621         } else {
 5622                 status = cam_periph_alloc(proberegister, NULL, probecleanup,
 5623                                           probestart, "probe",
 5624                                           CAM_PERIPH_BIO,
 5625                                           request_ccb->ccb_h.path, NULL, 0,
 5626                                           request_ccb);
 5627 
 5628                 if (status != CAM_REQ_CMP) {
 5629                         xpt_print_path(path);
 5630                         printf("xpt_scan_lun: cam_alloc_periph returned an "
 5631                                "error, can't continue probe\n");
 5632                         request_ccb->ccb_h.status = status;
 5633                         xpt_done(request_ccb);
 5634                 }
 5635         }
 5636         splx(s);
 5637 }
 5638 
 5639 static void
 5640 xptscandone(struct cam_periph *periph, union ccb *done_ccb)
 5641 {
 5642         xpt_release_path(done_ccb->ccb_h.path);
 5643         free(done_ccb->ccb_h.path, M_TEMP);
 5644         free(done_ccb, M_TEMP);
 5645 }
 5646 
 5647 static cam_status
 5648 proberegister(struct cam_periph *periph, void *arg)
 5649 {
 5650         union ccb *request_ccb; /* CCB representing the probe request */
 5651         probe_softc *softc;
 5652 
 5653         request_ccb = (union ccb *)arg;
 5654         if (periph == NULL) {
 5655                 printf("proberegister: periph was NULL!!\n");
 5656                 return(CAM_REQ_CMP_ERR);
 5657         }
 5658 
 5659         if (request_ccb == NULL) {
 5660                 printf("proberegister: no probe CCB, "
 5661                        "can't register device\n");
 5662                 return(CAM_REQ_CMP_ERR);
 5663         }
 5664 
 5665         softc = (probe_softc *)malloc(sizeof(*softc), M_TEMP, M_NOWAIT);
 5666 
 5667         if (softc == NULL) {
 5668                 printf("proberegister: Unable to probe new device. "
 5669                        "Unable to allocate softc\n");                           
 5670                 return(CAM_REQ_CMP_ERR);
 5671         }
 5672         TAILQ_INIT(&softc->request_ccbs);
 5673         TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h,
 5674                           periph_links.tqe);
 5675         softc->flags = 0;
 5676         periph->softc = softc;
 5677         cam_periph_acquire(periph);
 5678         /*
 5679          * Ensure we've waited at least a bus settle
 5680          * delay before attempting to probe the device.
 5681          * For HBAs that don't do bus resets, this won't make a difference.
 5682          */
 5683         cam_periph_freeze_after_event(periph, &periph->path->bus->last_reset,
 5684                                       scsi_delay);
 5685         probeschedule(periph);
 5686         return(CAM_REQ_CMP);
 5687 }
 5688 
 5689 static void
 5690 probeschedule(struct cam_periph *periph)
 5691 {
 5692         struct ccb_pathinq cpi;
 5693         union ccb *ccb;
 5694         probe_softc *softc;
 5695 
 5696         softc = (probe_softc *)periph->softc;
 5697         ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs);
 5698 
 5699         xpt_setup_ccb(&cpi.ccb_h, periph->path, /*priority*/1);
 5700         cpi.ccb_h.func_code = XPT_PATH_INQ;
 5701         xpt_action((union ccb *)&cpi);
 5702 
 5703         /*
 5704          * If a device has gone away and another device, or the same one,
 5705          * is back in the same place, it should have a unit attention
 5706          * condition pending.  It will not report the unit attention in
 5707          * response to an inquiry, which may leave invalid transfer
 5708          * negotiations in effect.  The TUR will reveal the unit attention
 5709          * condition.  Only send the TUR for lun 0, since some devices 
 5710          * will get confused by commands other than inquiry to non-existent
 5711          * luns.  If you think a device has gone away start your scan from
 5712          * lun 0.  This will insure that any bogus transfer settings are
 5713          * invalidated.
 5714          *
 5715          * If we haven't seen the device before and the controller supports
 5716          * some kind of transfer negotiation, negotiate with the first
 5717          * sent command if no bus reset was performed at startup.  This
 5718          * ensures that the device is not confused by transfer negotiation
 5719          * settings left over by loader or BIOS action.
 5720          */
 5721         if (((ccb->ccb_h.path->device->flags & CAM_DEV_UNCONFIGURED) == 0)
 5722          && (ccb->ccb_h.target_lun == 0)) {
 5723                 softc->action = PROBE_TUR;
 5724         } else if ((cpi.hba_inquiry & (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE)) != 0
 5725               && (cpi.hba_misc & PIM_NOBUSRESET) != 0) {
 5726                 proberequestdefaultnegotiation(periph);
 5727                 softc->action = PROBE_INQUIRY;
 5728         } else {
 5729                 softc->action = PROBE_INQUIRY;
 5730         }
 5731 
 5732         if (ccb->crcn.flags & CAM_EXPECT_INQ_CHANGE)
 5733                 softc->flags |= PROBE_NO_ANNOUNCE;
 5734         else
 5735                 softc->flags &= ~PROBE_NO_ANNOUNCE;
 5736 
 5737         xpt_schedule(periph, ccb->ccb_h.pinfo.priority);
 5738 }
 5739 
 5740 static void
 5741 probestart(struct cam_periph *periph, union ccb *start_ccb)
 5742 {
 5743         /* Probe the device that our peripheral driver points to */
 5744         struct ccb_scsiio *csio;
 5745         probe_softc *softc;
 5746 
 5747         CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probestart\n"));
 5748 
 5749         softc = (probe_softc *)periph->softc;
 5750         csio = &start_ccb->csio;
 5751 
 5752         switch (softc->action) {
 5753         case PROBE_TUR:
 5754         case PROBE_TUR_FOR_NEGOTIATION:
 5755         {
 5756                 scsi_test_unit_ready(csio,
 5757                                      /*retries*/4,
 5758                                      probedone,
 5759                                      MSG_SIMPLE_Q_TAG,
 5760                                      SSD_FULL_SIZE,
 5761                                      /*timeout*/60000);
 5762                 break;
 5763         }
 5764         case PROBE_INQUIRY:
 5765         case PROBE_FULL_INQUIRY:
 5766         {
 5767                 u_int inquiry_len;
 5768                 struct scsi_inquiry_data *inq_buf;
 5769 
 5770                 inq_buf = &periph->path->device->inq_data;
 5771                 /*
 5772                  * If the device is currently configured, we calculate an
 5773                  * MD5 checksum of the inquiry data, and if the serial number
 5774                  * length is greater than 0, add the serial number data
 5775                  * into the checksum as well.  Once the inquiry and the
 5776                  * serial number check finish, we attempt to figure out
 5777                  * whether we still have the same device.
 5778                  */
 5779                 if ((periph->path->device->flags & CAM_DEV_UNCONFIGURED) == 0) {
 5780                         
 5781                         MD5Init(&softc->context);
 5782                         MD5Update(&softc->context, (unsigned char *)inq_buf,
 5783                                   sizeof(struct scsi_inquiry_data));
 5784                         softc->flags |= PROBE_INQUIRY_CKSUM;
 5785                         if (periph->path->device->serial_num_len > 0) {
 5786                                 MD5Update(&softc->context,
 5787                                           periph->path->device->serial_num,
 5788                                           periph->path->device->serial_num_len);
 5789                                 softc->flags |= PROBE_SERIAL_CKSUM;
 5790                         }
 5791                         MD5Final(softc->digest, &softc->context);
 5792                 } 
 5793 
 5794                 if (softc->action == PROBE_INQUIRY)
 5795                         inquiry_len = SHORT_INQUIRY_LENGTH;
 5796                 else
 5797                         inquiry_len = inq_buf->additional_length
 5798                                     + offsetof(struct scsi_inquiry_data,
 5799                                                additional_length) + 1;
 5800 
 5801                 /*
 5802                  * Some parallel SCSI devices fail to send an
 5803                  * ignore wide residue message when dealing with
 5804                  * odd length inquiry requests.  Round up to be
 5805                  * safe.
 5806                  */
 5807                 inquiry_len = roundup2(inquiry_len, 2);
 5808         
 5809                 scsi_inquiry(csio,
 5810                              /*retries*/4,
 5811                              probedone,
 5812                              MSG_SIMPLE_Q_TAG,
 5813                              (u_int8_t *)inq_buf,
 5814                              inquiry_len,
 5815                              /*evpd*/FALSE,
 5816                              /*page_code*/0,
 5817                              SSD_MIN_SIZE,
 5818                              /*timeout*/60 * 1000);
 5819                 break;
 5820         }
 5821         case PROBE_MODE_SENSE:
 5822         {
 5823                 void  *mode_buf;
 5824                 int    mode_buf_len;
 5825 
 5826                 mode_buf_len = sizeof(struct scsi_mode_header_6)
 5827                              + sizeof(struct scsi_mode_blk_desc)
 5828                              + sizeof(struct scsi_control_page);
 5829                 mode_buf = malloc(mode_buf_len, M_TEMP, M_NOWAIT);
 5830                 if (mode_buf != NULL) {
 5831                         scsi_mode_sense(csio,
 5832                                         /*retries*/4,
 5833                                         probedone,
 5834                                         MSG_SIMPLE_Q_TAG,
 5835                                         /*dbd*/FALSE,
 5836                                         SMS_PAGE_CTRL_CURRENT,
 5837                                         SMS_CONTROL_MODE_PAGE,
 5838                                         mode_buf,
 5839                                         mode_buf_len,
 5840                                         SSD_FULL_SIZE,
 5841                                         /*timeout*/60000);
 5842                         break;
 5843                 }
 5844                 xpt_print_path(periph->path);
 5845                 printf("Unable to mode sense control page - malloc failure\n");
 5846                 softc->action = PROBE_SERIAL_NUM;
 5847         }
 5848         /* FALLTHROUGH */
 5849         case PROBE_SERIAL_NUM:
 5850         {
 5851                 struct scsi_vpd_unit_serial_number *serial_buf;
 5852                 struct cam_ed* device;
 5853 
 5854                 serial_buf = NULL;
 5855                 device = periph->path->device;
 5856                 device->serial_num = NULL;
 5857                 device->serial_num_len = 0;
 5858 
 5859                 if ((device->quirk->quirks & CAM_QUIRK_NOSERIAL) == 0)
 5860                         serial_buf = (struct scsi_vpd_unit_serial_number *)
 5861                                 malloc(sizeof(*serial_buf), M_TEMP,
 5862                                         M_NOWAIT | M_ZERO);
 5863 
 5864                 if (serial_buf != NULL) {
 5865                         scsi_inquiry(csio,
 5866                                      /*retries*/4,
 5867                                      probedone,
 5868                                      MSG_SIMPLE_Q_TAG,
 5869                                      (u_int8_t *)serial_buf,
 5870                                      sizeof(*serial_buf),
 5871                                      /*evpd*/TRUE,
 5872                                      SVPD_UNIT_SERIAL_NUMBER,
 5873                                      SSD_MIN_SIZE,
 5874                                      /*timeout*/60 * 1000);
 5875                         break;
 5876                 }
 5877                 /*
 5878                  * We'll have to do without, let our probedone
 5879                  * routine finish up for us.
 5880                  */
 5881                 start_ccb->csio.data_ptr = NULL;
 5882                 probedone(periph, start_ccb);
 5883                 return;
 5884         }
 5885         }
 5886         xpt_action(start_ccb);
 5887 }
 5888 
 5889 static void
 5890 proberequestdefaultnegotiation(struct cam_periph *periph)
 5891 {
 5892         struct ccb_trans_settings cts;
 5893 
 5894         xpt_setup_ccb(&cts.ccb_h, periph->path, /*priority*/1);
 5895         cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
 5896 #ifdef CAM_NEW_TRAN_CODE
 5897         cts.type = CTS_TYPE_USER_SETTINGS;
 5898 #else /* CAM_NEW_TRAN_CODE */
 5899         cts.flags = CCB_TRANS_USER_SETTINGS;
 5900 #endif /* CAM_NEW_TRAN_CODE */
 5901         xpt_action((union ccb *)&cts);
 5902         cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
 5903 #ifdef CAM_NEW_TRAN_CODE
 5904         cts.type = CTS_TYPE_CURRENT_SETTINGS;
 5905 #else /* CAM_NEW_TRAN_CODE */
 5906         cts.flags &= ~CCB_TRANS_USER_SETTINGS;
 5907         cts.flags |= CCB_TRANS_CURRENT_SETTINGS;
 5908 #endif /* CAM_NEW_TRAN_CODE */
 5909         xpt_action((union ccb *)&cts);
 5910 }
 5911 
 5912 static void
 5913 probedone(struct cam_periph *periph, union ccb *done_ccb)
 5914 {
 5915         probe_softc *softc;
 5916         struct cam_path *path;
 5917         u_int32_t  priority;
 5918 
 5919         CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probedone\n"));
 5920 
 5921         softc = (probe_softc *)periph->softc;
 5922         path = done_ccb->ccb_h.path;
 5923         priority = done_ccb->ccb_h.pinfo.priority;
 5924 
 5925         switch (softc->action) {
 5926         case PROBE_TUR:
 5927         {
 5928                 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
 5929 
 5930                         if (cam_periph_error(done_ccb, 0,
 5931                                              SF_NO_PRINT, NULL) == ERESTART)
 5932                                 return;
 5933                         else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
 5934                                 /* Don't wedge the queue */
 5935                                 xpt_release_devq(done_ccb->ccb_h.path,
 5936                                                  /*count*/1,
 5937                                                  /*run_queue*/TRUE);
 5938                 }
 5939                 softc->action = PROBE_INQUIRY;
 5940                 xpt_release_ccb(done_ccb);
 5941                 xpt_schedule(periph, priority);
 5942                 return;
 5943         }
 5944         case PROBE_INQUIRY:
 5945         case PROBE_FULL_INQUIRY:
 5946         {
 5947                 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
 5948                         struct scsi_inquiry_data *inq_buf;
 5949                         u_int8_t periph_qual;
 5950 
 5951                         path->device->flags |= CAM_DEV_INQUIRY_DATA_VALID;
 5952                         inq_buf = &path->device->inq_data;
 5953 
 5954                         periph_qual = SID_QUAL(inq_buf);
 5955                         
 5956                         switch(periph_qual) {
 5957                         case SID_QUAL_LU_CONNECTED:
 5958                         {
 5959                                 u_int8_t len;
 5960 
 5961                                 /*
 5962                                  * We conservatively request only
 5963                                  * SHORT_INQUIRY_LEN bytes of inquiry
 5964                                  * information during our first try
 5965                                  * at sending an INQUIRY. If the device
 5966                                  * has more information to give,
 5967                                  * perform a second request specifying
 5968                                  * the amount of information the device
 5969                                  * is willing to give.
 5970                                  */
 5971                                 len = inq_buf->additional_length
 5972                                     + offsetof(struct scsi_inquiry_data,
 5973                                                additional_length) + 1;
 5974                                 if (softc->action == PROBE_INQUIRY
 5975                                  && len > SHORT_INQUIRY_LENGTH) {
 5976                                         softc->action = PROBE_FULL_INQUIRY;
 5977                                         xpt_release_ccb(done_ccb);
 5978                                         xpt_schedule(periph, priority);
 5979                                         return;
 5980                                 }
 5981 
 5982                                 xpt_find_quirk(path->device);
 5983 
 5984 #ifdef CAM_NEW_TRAN_CODE
 5985                                 xpt_devise_transport(path);
 5986 #endif /* CAM_NEW_TRAN_CODE */
 5987                                 if ((inq_buf->flags & SID_CmdQue) != 0)
 5988                                         softc->action = PROBE_MODE_SENSE;
 5989                                 else
 5990                                         softc->action = PROBE_SERIAL_NUM;
 5991 
 5992                                 path->device->flags &= ~CAM_DEV_UNCONFIGURED;
 5993 
 5994                                 xpt_release_ccb(done_ccb);
 5995                                 xpt_schedule(periph, priority);
 5996                                 return;
 5997                         }
 5998                         default:
 5999                                 break;
 6000                         }
 6001                 } else if (cam_periph_error(done_ccb, 0,
 6002                                             done_ccb->ccb_h.target_lun > 0
 6003                                             ? SF_RETRY_UA|SF_QUIET_IR
 6004                                             : SF_RETRY_UA,
 6005                                             &softc->saved_ccb) == ERESTART) {
 6006                         return;
 6007                 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
 6008                         /* Don't wedge the queue */
 6009                         xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
 6010                                          /*run_queue*/TRUE);
 6011                 }
 6012                 /*
 6013                  * If we get to this point, we got an error status back
 6014                  * from the inquiry and the error status doesn't require
 6015                  * automatically retrying the command.  Therefore, the
 6016                  * inquiry failed.  If we had inquiry information before
 6017                  * for this device, but this latest inquiry command failed,
 6018                  * the device has probably gone away.  If this device isn't
 6019                  * already marked unconfigured, notify the peripheral
 6020                  * drivers that this device is no more.
 6021                  */
 6022                 if ((path->device->flags & CAM_DEV_UNCONFIGURED) == 0)
 6023                         /* Send the async notification. */
 6024                         xpt_async(AC_LOST_DEVICE, path, NULL);
 6025 
 6026                 xpt_release_ccb(done_ccb);
 6027                 break;
 6028         }
 6029         case PROBE_MODE_SENSE:
 6030         {
 6031                 struct ccb_scsiio *csio;
 6032                 struct scsi_mode_header_6 *mode_hdr;
 6033 
 6034                 csio = &done_ccb->csio;
 6035                 mode_hdr = (struct scsi_mode_header_6 *)csio->data_ptr;
 6036                 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
 6037                         struct scsi_control_page *page;
 6038                         u_int8_t *offset;
 6039 
 6040                         offset = ((u_int8_t *)&mode_hdr[1])
 6041                             + mode_hdr->blk_desc_len;
 6042                         page = (struct scsi_control_page *)offset;
 6043                         path->device->queue_flags = page->queue_flags;
 6044                 } else if (cam_periph_error(done_ccb, 0,
 6045                                             SF_RETRY_UA|SF_NO_PRINT,
 6046                                             &softc->saved_ccb) == ERESTART) {
 6047                         return;
 6048                 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
 6049                         /* Don't wedge the queue */
 6050                         xpt_release_devq(done_ccb->ccb_h.path,
 6051                                          /*count*/1, /*run_queue*/TRUE);
 6052                 }
 6053                 xpt_release_ccb(done_ccb);
 6054                 free(mode_hdr, M_TEMP);
 6055                 softc->action = PROBE_SERIAL_NUM;
 6056                 xpt_schedule(periph, priority);
 6057                 return;
 6058         }
 6059         case PROBE_SERIAL_NUM:
 6060         {
 6061                 struct ccb_scsiio *csio;
 6062                 struct scsi_vpd_unit_serial_number *serial_buf;
 6063                 u_int32_t  priority;
 6064                 int changed;
 6065                 int have_serialnum;
 6066 
 6067                 changed = 1;
 6068                 have_serialnum = 0;
 6069                 csio = &done_ccb->csio;
 6070                 priority = done_ccb->ccb_h.pinfo.priority;
 6071                 serial_buf =
 6072                     (struct scsi_vpd_unit_serial_number *)csio->data_ptr;
 6073 
 6074                 /* Clean up from previous instance of this device */
 6075                 if (path->device->serial_num != NULL) {
 6076                         free(path->device->serial_num, M_CAMXPT);
 6077                         path->device->serial_num = NULL;
 6078                         path->device->serial_num_len = 0;
 6079                 }
 6080 
 6081                 if (serial_buf == NULL) {
 6082                         /*
 6083                          * Don't process the command as it was never sent
 6084                          */
 6085                 } else if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP
 6086                         && (serial_buf->length > 0)) {
 6087 
 6088                         have_serialnum = 1;
 6089                         path->device->serial_num =
 6090                                 (u_int8_t *)malloc((serial_buf->length + 1),
 6091                                                    M_CAMXPT, M_NOWAIT);
 6092                         if (path->device->serial_num != NULL) {
 6093                                 bcopy(serial_buf->serial_num,
 6094                                       path->device->serial_num,
 6095                                       serial_buf->length);
 6096                                 path->device->serial_num_len =
 6097                                     serial_buf->length;
 6098                                 path->device->serial_num[serial_buf->length]
 6099                                     = '\0';
 6100                         }
 6101                 } else if (cam_periph_error(done_ccb, 0,
 6102                                             SF_RETRY_UA|SF_NO_PRINT,
 6103                                             &softc->saved_ccb) == ERESTART) {
 6104                         return;
 6105                 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
 6106                         /* Don't wedge the queue */
 6107                         xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
 6108                                          /*run_queue*/TRUE);
 6109                 }
 6110                 
 6111                 /*
 6112                  * Let's see if we have seen this device before.
 6113                  */
 6114                 if ((softc->flags & PROBE_INQUIRY_CKSUM) != 0) {
 6115                         MD5_CTX context;
 6116                         u_int8_t digest[16];
 6117 
 6118                         MD5Init(&context);
 6119                         
 6120                         MD5Update(&context,
 6121                                   (unsigned char *)&path->device->inq_data,
 6122                                   sizeof(struct scsi_inquiry_data));
 6123 
 6124                         if (have_serialnum)
 6125                                 MD5Update(&context, serial_buf->serial_num,
 6126                                           serial_buf->length);
 6127 
 6128                         MD5Final(digest, &context);
 6129                         if (bcmp(softc->digest, digest, 16) == 0)
 6130                                 changed = 0;
 6131 
 6132                         /*
 6133                          * XXX Do we need to do a TUR in order to ensure
 6134                          *     that the device really hasn't changed???
 6135                          */
 6136                         if ((changed != 0)
 6137                          && ((softc->flags & PROBE_NO_ANNOUNCE) == 0))
 6138                                 xpt_async(AC_LOST_DEVICE, path, NULL);
 6139                 }
 6140                 if (serial_buf != NULL)
 6141                         free(serial_buf, M_TEMP);
 6142 
 6143                 if (changed != 0) {
 6144                         /*
 6145                          * Now that we have all the necessary
 6146                          * information to safely perform transfer
 6147                          * negotiations... Controllers don't perform
 6148                          * any negotiation or tagged queuing until
 6149                          * after the first XPT_SET_TRAN_SETTINGS ccb is
 6150                          * received.  So, on a new device, just retreive
 6151                          * the user settings, and set them as the current
 6152                          * settings to set the device up.
 6153                          */
 6154                         proberequestdefaultnegotiation(periph);
 6155                         xpt_release_ccb(done_ccb);
 6156 
 6157                         /*
 6158                          * Perform a TUR to allow the controller to
 6159                          * perform any necessary transfer negotiation.
 6160                          */
 6161                         softc->action = PROBE_TUR_FOR_NEGOTIATION;
 6162                         xpt_schedule(periph, priority);
 6163                         return;
 6164                 }
 6165                 xpt_release_ccb(done_ccb);
 6166                 break;
 6167         }
 6168         case PROBE_TUR_FOR_NEGOTIATION:
 6169                 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
 6170                         /* Don't wedge the queue */
 6171                         xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
 6172                                          /*run_queue*/TRUE);
 6173                 }
 6174 
 6175                 path->device->flags &= ~CAM_DEV_UNCONFIGURED;
 6176 
 6177                 if ((softc->flags & PROBE_NO_ANNOUNCE) == 0) {
 6178                         /* Inform the XPT that a new device has been found */
 6179                         done_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
 6180                         xpt_action(done_ccb);
 6181 
 6182                         xpt_async(AC_FOUND_DEVICE, done_ccb->ccb_h.path,
 6183                                   done_ccb);
 6184                 }
 6185                 xpt_release_ccb(done_ccb);
 6186                 break;
 6187         }
 6188         done_ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs);
 6189         TAILQ_REMOVE(&softc->request_ccbs, &done_ccb->ccb_h, periph_links.tqe);
 6190         done_ccb->ccb_h.status = CAM_REQ_CMP;
 6191         xpt_done(done_ccb);
 6192         if (TAILQ_FIRST(&softc->request_ccbs) == NULL) {
 6193                 cam_periph_invalidate(periph);
 6194                 cam_periph_release(periph);
 6195         } else {
 6196                 probeschedule(periph);
 6197         }
 6198 }
 6199 
 6200 static void
 6201 probecleanup(struct cam_periph *periph)
 6202 {
 6203         free(periph->softc, M_TEMP);
 6204 }
 6205 
 6206 static void
 6207 xpt_find_quirk(struct cam_ed *device)
 6208 {
 6209         caddr_t match;
 6210 
 6211         match = cam_quirkmatch((caddr_t)&device->inq_data,
 6212                                (caddr_t)xpt_quirk_table,
 6213                                sizeof(xpt_quirk_table)/sizeof(*xpt_quirk_table),
 6214                                sizeof(*xpt_quirk_table), scsi_inquiry_match);
 6215 
 6216         if (match == NULL)
 6217                 panic("xpt_find_quirk: device didn't match wildcard entry!!");
 6218 
 6219         device->quirk = (struct xpt_quirk_entry *)match;
 6220 }
 6221 
 6222 static int
 6223 sysctl_cam_search_luns(SYSCTL_HANDLER_ARGS)
 6224 {
 6225         int error, bool;
 6226 
 6227         bool = cam_srch_hi;
 6228         error = sysctl_handle_int(oidp, &bool, sizeof(bool), req);
 6229         if (error != 0 || req->newptr == NULL)
 6230                 return (error);
 6231         if (bool == 0 || bool == 1) {
 6232                 cam_srch_hi = bool;
 6233                 return (0);
 6234         } else {
 6235                 return (EINVAL);
 6236         }
 6237 }
 6238 
 6239 #ifdef CAM_NEW_TRAN_CODE
 6240 
 6241 static void
 6242 xpt_devise_transport(struct cam_path *path)
 6243 {
 6244         struct ccb_pathinq cpi;
 6245         struct ccb_trans_settings cts;
 6246         struct scsi_inquiry_data *inq_buf;
 6247 
 6248         /* Get transport information from the SIM */
 6249         xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
 6250         cpi.ccb_h.func_code = XPT_PATH_INQ;
 6251         xpt_action((union ccb *)&cpi);
 6252 
 6253         inq_buf = NULL;
 6254         if ((path->device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0)
 6255                 inq_buf = &path->device->inq_data;
 6256         path->device->protocol = PROTO_SCSI;
 6257         path->device->protocol_version =
 6258             inq_buf != NULL ? SID_ANSI_REV(inq_buf) : cpi.protocol_version;
 6259         path->device->transport = cpi.transport;
 6260         path->device->transport_version = cpi.transport_version;
 6261 
 6262         /*
 6263          * Any device not using SPI3 features should
 6264          * be considered SPI2 or lower.
 6265          */
 6266         if (inq_buf != NULL) {
 6267                 if (path->device->transport == XPORT_SPI
 6268                  && (inq_buf->spi3data & SID_SPI_MASK) == 0
 6269                  && path->device->transport_version > 2)
 6270                         path->device->transport_version = 2;
 6271         } else {
 6272                 struct cam_ed* otherdev;
 6273 
 6274                 for (otherdev = TAILQ_FIRST(&path->target->ed_entries);
 6275                      otherdev != NULL;
 6276                      otherdev = TAILQ_NEXT(otherdev, links)) {
 6277                         if (otherdev != path->device)
 6278                                 break;
 6279                 }
 6280                     
 6281                 if (otherdev != NULL) {
 6282                         /*
 6283                          * Initially assume the same versioning as
 6284                          * prior luns for this target.
 6285                          */
 6286                         path->device->protocol_version =
 6287                             otherdev->protocol_version;
 6288                         path->device->transport_version =
 6289                             otherdev->transport_version;
 6290                 } else {
 6291                         /* Until we know better, opt for safty */
 6292                         path->device->protocol_version = 2;
 6293                         if (path->device->transport == XPORT_SPI)
 6294                                 path->device->transport_version = 2;
 6295                         else
 6296                                 path->device->transport_version = 0;
 6297                 }
 6298         }
 6299 
 6300         /*
 6301          * XXX
 6302          * For a device compliant with SPC-2 we should be able
 6303          * to determine the transport version supported by
 6304          * scrutinizing the version descriptors in the
 6305          * inquiry buffer.
 6306          */
 6307 
 6308         /* Tell the controller what we think */
 6309         xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1);
 6310         cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
 6311         cts.type = CTS_TYPE_CURRENT_SETTINGS;
 6312         cts.transport = path->device->transport;
 6313         cts.transport_version = path->device->transport_version;
 6314         cts.protocol = path->device->protocol;
 6315         cts.protocol_version = path->device->protocol_version;
 6316         cts.proto_specific.valid = 0;
 6317         cts.xport_specific.valid = 0;
 6318         xpt_action((union ccb *)&cts);
 6319 }
 6320 
 6321 static void
 6322 xpt_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_ed *device,
 6323                           int async_update)
 6324 {
 6325         struct  ccb_pathinq cpi;
 6326         struct  ccb_trans_settings cur_cts;
 6327         struct  ccb_trans_settings_scsi *scsi;
 6328         struct  ccb_trans_settings_scsi *cur_scsi;
 6329         struct  cam_sim *sim;
 6330         struct  scsi_inquiry_data *inq_data;
 6331 
 6332         if (device == NULL) {
 6333                 cts->ccb_h.status = CAM_PATH_INVALID;
 6334                 xpt_done((union ccb *)cts);
 6335                 return;
 6336         }
 6337 
 6338         if (cts->protocol == PROTO_UNKNOWN
 6339          || cts->protocol == PROTO_UNSPECIFIED) {
 6340                 cts->protocol = device->protocol;
 6341                 cts->protocol_version = device->protocol_version;
 6342         }
 6343 
 6344         if (cts->protocol_version == PROTO_VERSION_UNKNOWN
 6345          || cts->protocol_version == PROTO_VERSION_UNSPECIFIED)
 6346                 cts->protocol_version = device->protocol_version;
 6347 
 6348         if (cts->protocol != device->protocol) {
 6349                 xpt_print_path(cts->ccb_h.path);
 6350                 printf("Uninitialized Protocol %x:%x?\n",
 6351                        cts->protocol, device->protocol);
 6352                 cts->protocol = device->protocol;
 6353         }
 6354 
 6355         if (cts->protocol_version > device->protocol_version) {
 6356                 if (bootverbose) {
 6357                         xpt_print_path(cts->ccb_h.path);
 6358                         printf("Down reving Protocol Version from %d to %d?\n",
 6359                                cts->protocol_version, device->protocol_version);
 6360                 }
 6361                 cts->protocol_version = device->protocol_version;
 6362         }
 6363 
 6364         if (cts->transport == XPORT_UNKNOWN
 6365          || cts->transport == XPORT_UNSPECIFIED) {
 6366                 cts->transport = device->transport;
 6367                 cts->transport_version = device->transport_version;
 6368         }
 6369 
 6370         if (cts->transport_version == XPORT_VERSION_UNKNOWN
 6371          || cts->transport_version == XPORT_VERSION_UNSPECIFIED)
 6372                 cts->transport_version = device->transport_version;
 6373 
 6374         if (cts->transport != device->transport) {
 6375                 xpt_print_path(cts->ccb_h.path);
 6376                 printf("Uninitialized Transport %x:%x?\n",
 6377                        cts->transport, device->transport);
 6378                 cts->transport = device->transport;
 6379         }
 6380 
 6381         if (cts->transport_version > device->transport_version) {
 6382                 if (bootverbose) {
 6383                         xpt_print_path(cts->ccb_h.path);
 6384                         printf("Down reving Transport Version from %d to %d?\n",
 6385                                cts->transport_version,
 6386                                device->transport_version);
 6387                 }
 6388                 cts->transport_version = device->transport_version;
 6389         }
 6390 
 6391         sim = cts->ccb_h.path->bus->sim;
 6392 
 6393         /*
 6394          * Nothing more of interest to do unless
 6395          * this is a device connected via the
 6396          * SCSI protocol.
 6397          */
 6398         if (cts->protocol != PROTO_SCSI) {
 6399                 if (async_update == FALSE) 
 6400                         (*(sim->sim_action))(sim, (union ccb *)cts);
 6401                 return;
 6402         }
 6403 
 6404         inq_data = &device->inq_data;
 6405         scsi = &cts->proto_specific.scsi;
 6406         xpt_setup_ccb(&cpi.ccb_h, cts->ccb_h.path, /*priority*/1);
 6407         cpi.ccb_h.func_code = XPT_PATH_INQ;
 6408         xpt_action((union ccb *)&cpi);
 6409 
 6410         /* SCSI specific sanity checking */
 6411         if ((cpi.hba_inquiry & PI_TAG_ABLE) == 0
 6412          || (inq_data->flags & SID_CmdQue) == 0
 6413          || (device->queue_flags & SCP_QUEUE_DQUE) != 0
 6414          || (device->quirk->mintags == 0)) {
 6415                 /*
 6416                  * Can't tag on hardware that doesn't support tags,
 6417                  * doesn't have it enabled, or has broken tag support.
 6418                  */
 6419                 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
 6420         }
 6421 
 6422         if (async_update == FALSE) {
 6423                 /*
 6424                  * Perform sanity checking against what the
 6425                  * controller and device can do.
 6426                  */
 6427                 xpt_setup_ccb(&cur_cts.ccb_h, cts->ccb_h.path, /*priority*/1);
 6428                 cur_cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
 6429                 cur_cts.type = cts->type;
 6430                 xpt_action((union ccb *)&cur_cts);
 6431 
 6432                 cur_scsi = &cur_cts.proto_specific.scsi;
 6433                 if ((scsi->valid & CTS_SCSI_VALID_TQ) == 0) {
 6434                         scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
 6435                         scsi->flags |= cur_scsi->flags & CTS_SCSI_FLAGS_TAG_ENB;
 6436                 }
 6437                 if ((cur_scsi->valid & CTS_SCSI_VALID_TQ) == 0)
 6438                         scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
 6439         }
 6440 
 6441         /* SPI specific sanity checking */
 6442         if (cts->transport == XPORT_SPI && async_update == FALSE) {
 6443                 u_int spi3caps;
 6444                 struct ccb_trans_settings_spi *spi;
 6445                 struct ccb_trans_settings_spi *cur_spi;
 6446 
 6447                 spi = &cts->xport_specific.spi;
 6448 
 6449                 cur_spi = &cur_cts.xport_specific.spi;
 6450 
 6451                 /* Fill in any gaps in what the user gave us */
 6452                 if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0)
 6453                         spi->sync_period = cur_spi->sync_period;
 6454                 if ((cur_spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0)
 6455                         spi->sync_period = 0;
 6456                 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0)
 6457                         spi->sync_offset = cur_spi->sync_offset;
 6458                 if ((cur_spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0)
 6459                         spi->sync_offset = 0;
 6460                 if ((spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0)
 6461                         spi->ppr_options = cur_spi->ppr_options;
 6462                 if ((cur_spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0)
 6463                         spi->ppr_options = 0;
 6464                 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) == 0)
 6465                         spi->bus_width = cur_spi->bus_width;
 6466                 if ((cur_spi->valid & CTS_SPI_VALID_BUS_WIDTH) == 0)
 6467                         spi->bus_width = 0;
 6468                 if ((spi->valid & CTS_SPI_VALID_DISC) == 0) {
 6469                         spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
 6470                         spi->flags |= cur_spi->flags & CTS_SPI_FLAGS_DISC_ENB;
 6471                 }
 6472                 if ((cur_spi->valid & CTS_SPI_VALID_DISC) == 0)
 6473                         spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
 6474                 if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0
 6475                   && (inq_data->flags & SID_Sync) == 0
 6476                   && cts->type == CTS_TYPE_CURRENT_SETTINGS)
 6477                  || ((cpi.hba_inquiry & PI_SDTR_ABLE) == 0)
 6478                  || (cur_spi->sync_offset == 0)
 6479                  || (cur_spi->sync_period == 0)) {
 6480                         /* Force async */
 6481                         spi->sync_period = 0;
 6482                         spi->sync_offset = 0;
 6483                 }
 6484 
 6485                 switch (spi->bus_width) {
 6486                 case MSG_EXT_WDTR_BUS_32_BIT:
 6487                         if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
 6488                           || (inq_data->flags & SID_WBus32) != 0
 6489                           || cts->type == CTS_TYPE_USER_SETTINGS)
 6490                          && (cpi.hba_inquiry & PI_WIDE_32) != 0)
 6491                                 break;
 6492                         /* Fall Through to 16-bit */
 6493                 case MSG_EXT_WDTR_BUS_16_BIT:
 6494                         if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
 6495                           || (inq_data->flags & SID_WBus16) != 0
 6496                           || cts->type == CTS_TYPE_USER_SETTINGS)
 6497                          && (cpi.hba_inquiry & PI_WIDE_16) != 0) {
 6498                                 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
 6499                                 break;
 6500                         }
 6501                         /* Fall Through to 8-bit */
 6502                 default: /* New bus width?? */
 6503                 case MSG_EXT_WDTR_BUS_8_BIT:
 6504                         /* All targets can do this */
 6505                         spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
 6506                         break;
 6507                 }
 6508 
 6509                 spi3caps = cpi.xport_specific.spi.ppr_options;
 6510                 if ((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0
 6511                  && cts->type == CTS_TYPE_CURRENT_SETTINGS)
 6512                         spi3caps &= inq_data->spi3data;
 6513 
 6514                 if ((spi3caps & SID_SPI_CLOCK_DT) == 0)
 6515                         spi->ppr_options &= ~MSG_EXT_PPR_DT_REQ;
 6516 
 6517                 if ((spi3caps & SID_SPI_IUS) == 0)
 6518                         spi->ppr_options &= ~MSG_EXT_PPR_IU_REQ;
 6519 
 6520                 if ((spi3caps & SID_SPI_QAS) == 0)
 6521                         spi->ppr_options &= ~MSG_EXT_PPR_QAS_REQ;
 6522 
 6523                 /* No SPI Transfer settings are allowed unless we are wide */
 6524                 if (spi->bus_width == 0)
 6525                         spi->ppr_options = 0;
 6526 
 6527                 if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) == 0) {
 6528                         /*
 6529                          * Can't tag queue without disconnection.
 6530                          */
 6531                         scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
 6532                         scsi->valid |= CTS_SCSI_VALID_TQ;
 6533                 }
 6534 
 6535                 /*
 6536                  * If we are currently performing tagged transactions to
 6537                  * this device and want to change its negotiation parameters,
 6538                  * go non-tagged for a bit to give the controller a chance to
 6539                  * negotiate unhampered by tag messages.
 6540                  */
 6541                 if (cts->type == CTS_TYPE_CURRENT_SETTINGS
 6542                  && (device->inq_flags & SID_CmdQue) != 0
 6543                  && (scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0
 6544                  && (spi->flags & (CTS_SPI_VALID_SYNC_RATE|
 6545                                    CTS_SPI_VALID_SYNC_OFFSET|
 6546                                    CTS_SPI_VALID_BUS_WIDTH)) != 0)
 6547                         xpt_toggle_tags(cts->ccb_h.path);
 6548         }
 6549 
 6550         if (cts->type == CTS_TYPE_CURRENT_SETTINGS
 6551          && (scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
 6552                 int device_tagenb;
 6553 
 6554                 /*
 6555                  * If we are transitioning from tags to no-tags or
 6556                  * vice-versa, we need to carefully freeze and restart
 6557                  * the queue so that we don't overlap tagged and non-tagged
 6558                  * commands.  We also temporarily stop tags if there is
 6559                  * a change in transfer negotiation settings to allow
 6560                  * "tag-less" negotiation.
 6561                  */
 6562                 if ((device->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
 6563                  || (device->inq_flags & SID_CmdQue) != 0)
 6564                         device_tagenb = TRUE;
 6565                 else
 6566                         device_tagenb = FALSE;
 6567 
 6568                 if (((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0
 6569                   && device_tagenb == FALSE)
 6570                  || ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) == 0
 6571                   && device_tagenb == TRUE)) {
 6572 
 6573                         if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) {
 6574                                 /*
 6575                                  * Delay change to use tags until after a
 6576                                  * few commands have gone to this device so
 6577                                  * the controller has time to perform transfer
 6578                                  * negotiations without tagged messages getting
 6579                                  * in the way.
 6580                                  */
 6581                                 device->tag_delay_count = CAM_TAG_DELAY_COUNT;
 6582                                 device->flags |= CAM_DEV_TAG_AFTER_COUNT;
 6583