The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/cam/cam_xpt.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Implementation of the Common Access Method Transport (XPT) layer.
    3  *
    4  * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs.
    5  * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry.
    6  * All rights reserved.
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions, and the following disclaimer,
   13  *    without modification, immediately at the beginning of the file.
   14  * 2. The name of the author may not be used to endorse or promote products
   15  *    derived from this software without specific prior written permission.
   16  *
   17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   20  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
   21  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   27  * SUCH DAMAGE.
   28  */
   29 
   30 #include <sys/cdefs.h>
   31 __FBSDID("$FreeBSD: releng/6.2/sys/cam/cam_xpt.c 162582 2006-09-23 18:42:08Z mjacob $");
   32 
   33 #include <sys/param.h>
   34 #include <sys/bus.h>
   35 #include <sys/systm.h>
   36 #include <sys/types.h>
   37 #include <sys/malloc.h>
   38 #include <sys/kernel.h>
   39 #include <sys/time.h>
   40 #include <sys/conf.h>
   41 #include <sys/fcntl.h>
   42 #include <sys/md5.h>
   43 #include <sys/interrupt.h>
   44 #include <sys/sbuf.h>
   45 
   46 #include <sys/lock.h>
   47 #include <sys/mutex.h>
   48 #include <sys/sysctl.h>
   49 
   50 #ifdef PC98
   51 #include <pc98/pc98/pc98_machdep.h>     /* geometry translation */
   52 #endif
   53 
   54 #include <cam/cam.h>
   55 #include <cam/cam_ccb.h>
   56 #include <cam/cam_periph.h>
   57 #include <cam/cam_sim.h>
   58 #include <cam/cam_xpt.h>
   59 #include <cam/cam_xpt_sim.h>
   60 #include <cam/cam_xpt_periph.h>
   61 #include <cam/cam_debug.h>
   62 
   63 #include <cam/scsi/scsi_all.h>
   64 #include <cam/scsi/scsi_message.h>
   65 #include <cam/scsi/scsi_pass.h>
   66 #include "opt_cam.h"
   67 
   68 /* Datastructures internal to the xpt layer */
   69 MALLOC_DEFINE(M_CAMXPT, "CAM XPT", "CAM XPT buffers");
   70 
   71 /*
   72  * Definition of an async handler callback block.  These are used to add
   73  * SIMs and peripherals to the async callback lists.
   74  */
   75 struct async_node {
   76         SLIST_ENTRY(async_node) links;
   77         u_int32_t       event_enable;   /* Async Event enables */
   78         void            (*callback)(void *arg, u_int32_t code,
   79                                     struct cam_path *path, void *args);
   80         void            *callback_arg;
   81 };
   82 
   83 SLIST_HEAD(async_list, async_node);
   84 SLIST_HEAD(periph_list, cam_periph);
   85 static STAILQ_HEAD(highpowerlist, ccb_hdr) highpowerq;
   86 
   87 /*
   88  * This is the maximum number of high powered commands (e.g. start unit)
   89  * that can be outstanding at a particular time.
   90  */
   91 #ifndef CAM_MAX_HIGHPOWER
   92 #define CAM_MAX_HIGHPOWER  4
   93 #endif
   94 
   95 /* number of high powered commands that can go through right now */
   96 static int num_highpower = CAM_MAX_HIGHPOWER;
   97 
   98 /*
   99  * Structure for queueing a device in a run queue.
  100  * There is one run queue for allocating new ccbs,
  101  * and another for sending ccbs to the controller.
  102  */
  103 struct cam_ed_qinfo {
  104         cam_pinfo pinfo;
  105         struct    cam_ed *device;
  106 };
  107 
  108 /*
  109  * The CAM EDT (Existing Device Table) contains the device information for
  110  * all devices for all busses in the system.  The table contains a
  111  * cam_ed structure for each device on the bus.
  112  */
  113 struct cam_ed {
  114         TAILQ_ENTRY(cam_ed) links;
  115         struct  cam_ed_qinfo alloc_ccb_entry;
  116         struct  cam_ed_qinfo send_ccb_entry;
  117         struct  cam_et   *target;
  118         lun_id_t         lun_id;
  119         struct  camq drvq;              /*
  120                                          * Queue of type drivers wanting to do
  121                                          * work on this device.
  122                                          */
  123         struct  cam_ccbq ccbq;          /* Queue of pending ccbs */
  124         struct  async_list asyncs;      /* Async callback info for this B/T/L */
  125         struct  periph_list periphs;    /* All attached devices */
  126         u_int   generation;             /* Generation number */
  127         struct  cam_periph *owner;      /* Peripheral driver's ownership tag */
  128         struct  xpt_quirk_entry *quirk; /* Oddities about this device */
  129                                         /* Storage for the inquiry data */
  130 #ifdef CAM_NEW_TRAN_CODE
  131         cam_proto        protocol;
  132         u_int            protocol_version;
  133         cam_xport        transport;
  134         u_int            transport_version;
  135 #endif /* CAM_NEW_TRAN_CODE */
  136         struct           scsi_inquiry_data inq_data;
  137         u_int8_t         inq_flags;     /*
  138                                          * Current settings for inquiry flags.
  139                                          * This allows us to override settings
  140                                          * like disconnection and tagged
  141                                          * queuing for a device.
  142                                          */
  143         u_int8_t         queue_flags;   /* Queue flags from the control page */
  144         u_int8_t         serial_num_len;
  145         u_int8_t        *serial_num;
  146         u_int32_t        qfrozen_cnt;
  147         u_int32_t        flags;
  148 #define CAM_DEV_UNCONFIGURED            0x01
  149 #define CAM_DEV_REL_TIMEOUT_PENDING     0x02
  150 #define CAM_DEV_REL_ON_COMPLETE         0x04
  151 #define CAM_DEV_REL_ON_QUEUE_EMPTY      0x08
  152 #define CAM_DEV_RESIZE_QUEUE_NEEDED     0x10
  153 #define CAM_DEV_TAG_AFTER_COUNT         0x20
  154 #define CAM_DEV_INQUIRY_DATA_VALID      0x40
  155         u_int32_t        tag_delay_count;
  156 #define CAM_TAG_DELAY_COUNT             5
  157         u_int32_t        tag_saved_openings;
  158         u_int32_t        refcount;
  159         struct           callout_handle c_handle;
  160 };
  161 
  162 /*
  163  * Each target is represented by an ET (Existing Target).  These
  164  * entries are created when a target is successfully probed with an
  165  * identify, and removed when a device fails to respond after a number
  166  * of retries, or a bus rescan finds the device missing.
  167  */
  168 struct cam_et { 
  169         TAILQ_HEAD(, cam_ed) ed_entries;
  170         TAILQ_ENTRY(cam_et) links;
  171         struct  cam_eb  *bus;   
  172         target_id_t     target_id;
  173         u_int32_t       refcount;       
  174         u_int           generation;
  175         struct          timeval last_reset;
  176 };
  177 
  178 /*
  179  * Each bus is represented by an EB (Existing Bus).  These entries
  180  * are created by calls to xpt_bus_register and deleted by calls to
  181  * xpt_bus_deregister.
  182  */
  183 struct cam_eb { 
  184         TAILQ_HEAD(, cam_et) et_entries;
  185         TAILQ_ENTRY(cam_eb)  links;
  186         path_id_t            path_id;
  187         struct cam_sim       *sim;
  188         struct timeval       last_reset;
  189         u_int32_t            flags;
  190 #define CAM_EB_RUNQ_SCHEDULED   0x01
  191         u_int32_t            refcount;
  192         u_int                generation;
  193 };
  194 
  195 struct cam_path {
  196         struct cam_periph *periph;
  197         struct cam_eb     *bus;
  198         struct cam_et     *target;
  199         struct cam_ed     *device;
  200 };
  201 
  202 struct xpt_quirk_entry {
  203         struct scsi_inquiry_pattern inq_pat;
  204         u_int8_t quirks;
  205 #define CAM_QUIRK_NOLUNS        0x01
  206 #define CAM_QUIRK_NOSERIAL      0x02
  207 #define CAM_QUIRK_HILUNS        0x04
  208 #define CAM_QUIRK_NOHILUNS      0x08
  209         u_int mintags;
  210         u_int maxtags;
  211 };
  212 
  213 static int cam_srch_hi = 0;
  214 TUNABLE_INT("kern.cam.cam_srch_hi", &cam_srch_hi);
  215 static int sysctl_cam_search_luns(SYSCTL_HANDLER_ARGS);
  216 SYSCTL_PROC(_kern_cam, OID_AUTO, cam_srch_hi, CTLTYPE_INT|CTLFLAG_RW, 0, 0,
  217     sysctl_cam_search_luns, "I",
  218     "allow search above LUN 7 for SCSI3 and greater devices");
  219 
  220 #define CAM_SCSI2_MAXLUN        8
  221 /*
  222  * If we're not quirked to search <= the first 8 luns
  223  * and we are either quirked to search above lun 8,
  224  * or we're > SCSI-2 and we've enabled hilun searching,
  225  * or we're > SCSI-2 and the last lun was a success,
  226  * we can look for luns above lun 8.
  227  */
  228 #define CAN_SRCH_HI_SPARSE(dv)                          \
  229   (((dv->quirk->quirks & CAM_QUIRK_NOHILUNS) == 0)      \
  230   && ((dv->quirk->quirks & CAM_QUIRK_HILUNS)            \
  231   || (SID_ANSI_REV(&dv->inq_data) > SCSI_REV_2 && cam_srch_hi)))
  232 
  233 #define CAN_SRCH_HI_DENSE(dv)                           \
  234   (((dv->quirk->quirks & CAM_QUIRK_NOHILUNS) == 0)      \
  235   && ((dv->quirk->quirks & CAM_QUIRK_HILUNS)            \
  236   || (SID_ANSI_REV(&dv->inq_data) > SCSI_REV_2)))
  237 
  238 typedef enum {
  239         XPT_FLAG_OPEN           = 0x01
  240 } xpt_flags;
  241 
  242 struct xpt_softc {
  243         xpt_flags       flags;
  244         u_int32_t       generation;
  245 };
  246 
  247 static const char quantum[] = "QUANTUM";
  248 static const char sony[] = "SONY";
  249 static const char west_digital[] = "WDIGTL";
  250 static const char samsung[] = "SAMSUNG";
  251 static const char seagate[] = "SEAGATE";
  252 static const char microp[] = "MICROP";
  253 
  254 static struct xpt_quirk_entry xpt_quirk_table[] = 
  255 {
  256         {
  257                 /* Reports QUEUE FULL for temporary resource shortages */
  258                 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP39100*", "*" },
  259                 /*quirks*/0, /*mintags*/24, /*maxtags*/32
  260         },
  261         {
  262                 /* Reports QUEUE FULL for temporary resource shortages */
  263                 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP34550*", "*" },
  264                 /*quirks*/0, /*mintags*/24, /*maxtags*/32
  265         },
  266         {
  267                 /* Reports QUEUE FULL for temporary resource shortages */
  268                 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP32275*", "*" },
  269                 /*quirks*/0, /*mintags*/24, /*maxtags*/32
  270         },
  271         {
  272                 /* Broken tagged queuing drive */
  273                 { T_DIRECT, SIP_MEDIA_FIXED, microp, "4421-07*", "*" },
  274                 /*quirks*/0, /*mintags*/0, /*maxtags*/
  275         },
  276         {
  277                 /* Broken tagged queuing drive */
  278                 { T_DIRECT, SIP_MEDIA_FIXED, "HP", "C372*", "*" },
  279                 /*quirks*/0, /*mintags*/0, /*maxtags*/
  280         },
  281         {
  282                 /* Broken tagged queuing drive */
  283                 { T_DIRECT, SIP_MEDIA_FIXED, microp, "3391*", "x43h" },
  284                 /*quirks*/0, /*mintags*/0, /*maxtags*/
  285         },
  286         {
  287                 /*
  288                  * Unfortunately, the Quantum Atlas III has the same
  289                  * problem as the Atlas II drives above.
  290                  * Reported by: "Johan Granlund" <johan@granlund.nu>
  291                  *
  292                  * For future reference, the drive with the problem was:
  293                  * QUANTUM QM39100TD-SW N1B0
  294                  * 
  295                  * It's possible that Quantum will fix the problem in later
  296                  * firmware revisions.  If that happens, the quirk entry
  297                  * will need to be made specific to the firmware revisions
  298                  * with the problem.
  299                  * 
  300                  */
  301                 /* Reports QUEUE FULL for temporary resource shortages */
  302                 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM39100*", "*" },
  303                 /*quirks*/0, /*mintags*/24, /*maxtags*/32
  304         },
  305         {
  306                 /*
  307                  * 18 Gig Atlas III, same problem as the 9G version.
  308                  * Reported by: Andre Albsmeier
  309                  *              <andre.albsmeier@mchp.siemens.de>
  310                  *
  311                  * For future reference, the drive with the problem was:
  312                  * QUANTUM QM318000TD-S N491
  313                  */
  314                 /* Reports QUEUE FULL for temporary resource shortages */
  315                 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM318000*", "*" },
  316                 /*quirks*/0, /*mintags*/24, /*maxtags*/32
  317         },
  318         {
  319                 /*
  320                  * Broken tagged queuing drive
  321                  * Reported by: Bret Ford <bford@uop.cs.uop.edu>
  322                  *         and: Martin Renters <martin@tdc.on.ca>
  323                  */
  324                 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST410800*", "71*" },
  325                 /*quirks*/0, /*mintags*/0, /*maxtags*/
  326         },
  327                 /*
  328                  * The Seagate Medalist Pro drives have very poor write
  329                  * performance with anything more than 2 tags.
  330                  * 
  331                  * Reported by:  Paul van der Zwan <paulz@trantor.xs4all.nl>
  332                  * Drive:  <SEAGATE ST36530N 1444>
  333                  *
  334                  * Reported by:  Jeremy Lea <reg@shale.csir.co.za>
  335                  * Drive:  <SEAGATE ST34520W 1281>
  336                  *
  337                  * No one has actually reported that the 9G version
  338                  * (ST39140*) of the Medalist Pro has the same problem, but
  339                  * we're assuming that it does because the 4G and 6.5G
  340                  * versions of the drive are broken.
  341                  */
  342         {
  343                 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST34520*", "*"},
  344                 /*quirks*/0, /*mintags*/2, /*maxtags*/2
  345         },
  346         {
  347                 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST36530*", "*"},
  348                 /*quirks*/0, /*mintags*/2, /*maxtags*/2
  349         },
  350         {
  351                 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST39140*", "*"},
  352                 /*quirks*/0, /*mintags*/2, /*maxtags*/2
  353         },
  354         {
  355                 /*
  356                  * Slow when tagged queueing is enabled.  Write performance
  357                  * steadily drops off with more and more concurrent
  358                  * transactions.  Best sequential write performance with
  359                  * tagged queueing turned off and write caching turned on.
  360                  *
  361                  * PR:  kern/10398
  362                  * Submitted by:  Hideaki Okada <hokada@isl.melco.co.jp>
  363                  * Drive:  DCAS-34330 w/ "S65A" firmware.
  364                  *
  365                  * The drive with the problem had the "S65A" firmware
  366                  * revision, and has also been reported (by Stephen J.
  367                  * Roznowski <sjr@home.net>) for a drive with the "S61A"
  368                  * firmware revision.
  369                  *
  370                  * Although no one has reported problems with the 2 gig
  371                  * version of the DCAS drive, the assumption is that it
  372                  * has the same problems as the 4 gig version.  Therefore
  373                  * this quirk entries disables tagged queueing for all
  374                  * DCAS drives.
  375                  */
  376                 { T_DIRECT, SIP_MEDIA_FIXED, "IBM", "DCAS*", "*" },
  377                 /*quirks*/0, /*mintags*/0, /*maxtags*/
  378         },
  379         {
  380                 /* Broken tagged queuing drive */
  381                 { T_DIRECT, SIP_MEDIA_REMOVABLE, "iomega", "jaz*", "*" },
  382                 /*quirks*/0, /*mintags*/0, /*maxtags*/
  383         },
  384         {
  385                 /* Broken tagged queuing drive */ 
  386                 { T_DIRECT, SIP_MEDIA_FIXED, "CONNER", "CFP2107*", "*" },
  387                 /*quirks*/0, /*mintags*/0, /*maxtags*/
  388         },
  389         {
  390                 /* Does not support other than LUN 0 */
  391                 { T_DIRECT, SIP_MEDIA_FIXED, "VMware*", "*", "*" },
  392                 CAM_QUIRK_NOLUNS, /*mintags*/2, /*maxtags*/255
  393         },
  394         {
  395                 /*
  396                  * Broken tagged queuing drive.
  397                  * Submitted by:
  398                  * NAKAJI Hiroyuki <nakaji@zeisei.dpri.kyoto-u.ac.jp>
  399                  * in PR kern/9535
  400                  */
  401                 { T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN34324U*", "*" },
  402                 /*quirks*/0, /*mintags*/0, /*maxtags*/
  403         },
  404         {
  405                 /*
  406                  * Slow when tagged queueing is enabled. (1.5MB/sec versus
  407                  * 8MB/sec.)
  408                  * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
  409                  * Best performance with these drives is achieved with
  410                  * tagged queueing turned off, and write caching turned on.
  411                  */
  412                 { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "WDE*", "*" },
  413                 /*quirks*/0, /*mintags*/0, /*maxtags*/
  414         },
  415         {
  416                 /*
  417                  * Slow when tagged queueing is enabled. (1.5MB/sec versus
  418                  * 8MB/sec.)
  419                  * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
  420                  * Best performance with these drives is achieved with
  421                  * tagged queueing turned off, and write caching turned on.
  422                  */
  423                 { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "ENTERPRISE", "*" },
  424                 /*quirks*/0, /*mintags*/0, /*maxtags*/
  425         },
  426         {
  427                 /*
  428                  * Doesn't handle queue full condition correctly,
  429                  * so we need to limit maxtags to what the device
  430                  * can handle instead of determining this automatically.
  431                  */
  432                 { T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN321010S*", "*" },
  433                 /*quirks*/0, /*mintags*/2, /*maxtags*/32
  434         },
  435         {
  436                 /* Really only one LUN */
  437                 { T_ENCLOSURE, SIP_MEDIA_FIXED, "SUN", "SENA", "*" },
  438                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
  439         },
  440         {
  441                 /* I can't believe we need a quirk for DPT volumes. */
  442                 { T_ANY, SIP_MEDIA_FIXED|SIP_MEDIA_REMOVABLE, "DPT", "*", "*" },
  443                 CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS,
  444                 /*mintags*/0, /*maxtags*/255
  445         },
  446         {
  447                 /*
  448                  * Many Sony CDROM drives don't like multi-LUN probing.
  449                  */
  450                 { T_CDROM, SIP_MEDIA_REMOVABLE, sony, "CD-ROM CDU*", "*" },
  451                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
  452         },
  453         {
  454                 /*
  455                  * This drive doesn't like multiple LUN probing.
  456                  * Submitted by:  Parag Patel <parag@cgt.com>
  457                  */
  458                 { T_WORM, SIP_MEDIA_REMOVABLE, sony, "CD-R   CDU9*", "*" },
  459                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
  460         },
  461         {
  462                 { T_WORM, SIP_MEDIA_REMOVABLE, "YAMAHA", "CDR100*", "*" },
  463                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
  464         },
  465         {
  466                 /*
  467                  * The 8200 doesn't like multi-lun probing, and probably
  468                  * don't like serial number requests either.
  469                  */
  470                 {
  471                         T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE",
  472                         "EXB-8200*", "*"
  473                 },
  474                 CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
  475         },
  476         {
  477                 /*
  478                  * Let's try the same as above, but for a drive that says
  479                  * it's an IPL-6860 but is actually an EXB 8200.
  480                  */
  481                 {
  482                         T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE",
  483                         "IPL-6860*", "*"
  484                 },
  485                 CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
  486         },
  487         {
  488                 /*
  489                  * These Hitachi drives don't like multi-lun probing.
  490                  * The PR submitter has a DK319H, but says that the Linux
  491                  * kernel has a similar work-around for the DK312 and DK314,
  492                  * so all DK31* drives are quirked here.
  493                  * PR:            misc/18793
  494                  * Submitted by:  Paul Haddad <paul@pth.com>
  495                  */
  496                 { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "DK31*", "*" },
  497                 CAM_QUIRK_NOLUNS, /*mintags*/2, /*maxtags*/255
  498         },
  499         {
  500                 /*
  501                  * The Hitachi CJ series with J8A8 firmware apparantly has
  502                  * problems with tagged commands.
  503                  * PR: 23536
  504                  * Reported by: amagai@nue.org
  505                  */
  506                 { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "DK32CJ*", "J8A8" },
  507                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
  508         },
  509         {
  510                 /*
  511                  * These are the large storage arrays.
  512                  * Submitted by:  William Carrel <william.carrel@infospace.com>
  513                  */
  514                 { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "OPEN*", "*" },
  515                 CAM_QUIRK_HILUNS, 2, 1024
  516         },
  517         {
  518                 /*
  519                  * This old revision of the TDC3600 is also SCSI-1, and
  520                  * hangs upon serial number probing.
  521                  */
  522                 {
  523                         T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "TANDBERG",
  524                         " TDC 3600", "U07:"
  525                 },
  526                 CAM_QUIRK_NOSERIAL, /*mintags*/0, /*maxtags*/
  527         },
  528         {
  529                 /*
  530                  * Maxtor Personal Storage 3000XT (Firewire)
  531                  * hangs upon serial number probing.
  532                  */
  533                 {
  534                         T_DIRECT, SIP_MEDIA_FIXED, "Maxtor",
  535                         "1394 storage", "*"
  536                 },
  537                 CAM_QUIRK_NOSERIAL, /*mintags*/0, /*maxtags*/
  538         },
  539         {
  540                 /*
  541                  * Would repond to all LUNs if asked for.
  542                  */
  543                 {
  544                         T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "CALIPER",
  545                         "CP150", "*"
  546                 },
  547                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
  548         },
  549         {
  550                 /*
  551                  * Would repond to all LUNs if asked for.
  552                  */
  553                 {
  554                         T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "KENNEDY",
  555                         "96X2*", "*"
  556                 },
  557                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
  558         },
  559         {
  560                 /* Submitted by: Matthew Dodd <winter@jurai.net> */
  561                 { T_PROCESSOR, SIP_MEDIA_FIXED, "Cabletrn", "EA41*", "*" },
  562                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
  563         },
  564         {
  565                 /* Submitted by: Matthew Dodd <winter@jurai.net> */
  566                 { T_PROCESSOR, SIP_MEDIA_FIXED, "CABLETRN", "EA41*", "*" },
  567                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
  568         },
  569         {
  570                 /* TeraSolutions special settings for TRC-22 RAID */
  571                 { T_DIRECT, SIP_MEDIA_FIXED, "TERASOLU", "TRC-22", "*" },
  572                   /*quirks*/0, /*mintags*/55, /*maxtags*/255
  573         },
  574         {
  575                 /* Veritas Storage Appliance */
  576                 { T_DIRECT, SIP_MEDIA_FIXED, "VERITAS", "*", "*" },
  577                   CAM_QUIRK_HILUNS, /*mintags*/2, /*maxtags*/1024
  578         },
  579         {
  580                 /*
  581                  * Would respond to all LUNs.  Device type and removable
  582                  * flag are jumper-selectable.
  583                  */
  584                 { T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED, "MaxOptix",
  585                   "Tahiti 1", "*"
  586                 },
  587                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
  588         },
  589         {
  590                 /* EasyRAID E5A aka. areca ARC-6010 */
  591                 { T_DIRECT, SIP_MEDIA_FIXED, "easyRAID", "*", "*" },
  592                   CAM_QUIRK_NOHILUNS, /*mintags*/2, /*maxtags*/255
  593         },
  594         {
  595                 { T_ENCLOSURE, SIP_MEDIA_FIXED, "DP", "BACKPLANE", "*" },
  596                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
  597         },
  598         {
  599                 /* Default tagged queuing parameters for all devices */
  600                 {
  601                   T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
  602                   /*vendor*/"*", /*product*/"*", /*revision*/"*"
  603                 },
  604                 /*quirks*/0, /*mintags*/2, /*maxtags*/255
  605         },
  606 };
  607 
  608 static const int xpt_quirk_table_size =
  609         sizeof(xpt_quirk_table) / sizeof(*xpt_quirk_table);
  610 
  611 typedef enum {
  612         DM_RET_COPY             = 0x01,
  613         DM_RET_FLAG_MASK        = 0x0f,
  614         DM_RET_NONE             = 0x00,
  615         DM_RET_STOP             = 0x10,
  616         DM_RET_DESCEND          = 0x20,
  617         DM_RET_ERROR            = 0x30,
  618         DM_RET_ACTION_MASK      = 0xf0
  619 } dev_match_ret;
  620 
  621 typedef enum {
  622         XPT_DEPTH_BUS,
  623         XPT_DEPTH_TARGET,
  624         XPT_DEPTH_DEVICE,
  625         XPT_DEPTH_PERIPH
  626 } xpt_traverse_depth;
  627 
  628 struct xpt_traverse_config {
  629         xpt_traverse_depth      depth;
  630         void                    *tr_func;
  631         void                    *tr_arg;
  632 };
  633 
  634 typedef int     xpt_busfunc_t (struct cam_eb *bus, void *arg);
  635 typedef int     xpt_targetfunc_t (struct cam_et *target, void *arg);
  636 typedef int     xpt_devicefunc_t (struct cam_ed *device, void *arg);
  637 typedef int     xpt_periphfunc_t (struct cam_periph *periph, void *arg);
  638 typedef int     xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg);
  639 
  640 /* Transport layer configuration information */
  641 static struct xpt_softc xsoftc;
  642 
  643 /* Queues for our software interrupt handler */
  644 typedef TAILQ_HEAD(cam_isrq, ccb_hdr) cam_isrq_t;
  645 static cam_isrq_t cam_bioq;
  646 static struct mtx cam_bioq_lock;
  647 
  648 /* "Pool" of inactive ccbs managed by xpt_alloc_ccb and xpt_free_ccb */
  649 static SLIST_HEAD(,ccb_hdr) ccb_freeq;
  650 static u_int xpt_max_ccbs;      /*
  651                                  * Maximum size of ccb pool.  Modified as
  652                                  * devices are added/removed or have their
  653                                  * opening counts changed.
  654                                  */
  655 static u_int xpt_ccb_count;     /* Current count of allocated ccbs */
  656 
  657 struct cam_periph *xpt_periph;
  658 
  659 static periph_init_t xpt_periph_init;
  660 
  661 static periph_init_t probe_periph_init;
  662 
  663 static struct periph_driver xpt_driver =
  664 {
  665         xpt_periph_init, "xpt",
  666         TAILQ_HEAD_INITIALIZER(xpt_driver.units)
  667 };
  668 
  669 static struct periph_driver probe_driver =
  670 {
  671         probe_periph_init, "probe",
  672         TAILQ_HEAD_INITIALIZER(probe_driver.units)
  673 };
  674 
  675 PERIPHDRIVER_DECLARE(xpt, xpt_driver);
  676 PERIPHDRIVER_DECLARE(probe, probe_driver);
  677 
  678 
  679 static d_open_t xptopen;
  680 static d_close_t xptclose;
  681 static d_ioctl_t xptioctl;
  682 
  683 static struct cdevsw xpt_cdevsw = {
  684         .d_version =    D_VERSION,
  685         .d_flags =      D_NEEDGIANT,
  686         .d_open =       xptopen,
  687         .d_close =      xptclose,
  688         .d_ioctl =      xptioctl,
  689         .d_name =       "xpt",
  690 };
  691 
  692 static struct intr_config_hook *xpt_config_hook;
  693 
  694 static void dead_sim_action(struct cam_sim *sim, union ccb *ccb);
  695 static void dead_sim_poll(struct cam_sim *sim);
  696 
  697 /* Dummy SIM that is used when the real one has gone. */
  698 static struct cam_sim cam_dead_sim = {
  699         .sim_action =   dead_sim_action,
  700         .sim_poll =     dead_sim_poll,
  701         .sim_name =     "dead_sim",
  702 };
  703 
  704 #define SIM_DEAD(sim)   ((sim) == &cam_dead_sim)
  705 
  706 /* Registered busses */
  707 static TAILQ_HEAD(,cam_eb) xpt_busses;
  708 static u_int bus_generation;
  709 
  710 /* Storage for debugging datastructures */
  711 #ifdef  CAMDEBUG
  712 struct cam_path *cam_dpath;
  713 u_int32_t cam_dflags;
  714 u_int32_t cam_debug_delay;
  715 #endif
  716 
  717 /* Pointers to software interrupt handlers */
  718 static void *cambio_ih;
  719 
  720 #if defined(CAM_DEBUG_FLAGS) && !defined(CAMDEBUG)
  721 #error "You must have options CAMDEBUG to use options CAM_DEBUG_FLAGS"
  722 #endif
  723 
  724 /*
  725  * In order to enable the CAM_DEBUG_* options, the user must have CAMDEBUG
  726  * enabled.  Also, the user must have either none, or all of CAM_DEBUG_BUS,
  727  * CAM_DEBUG_TARGET, and CAM_DEBUG_LUN specified.
  728  */
  729 #if defined(CAM_DEBUG_BUS) || defined(CAM_DEBUG_TARGET) \
  730     || defined(CAM_DEBUG_LUN)
  731 #ifdef CAMDEBUG
  732 #if !defined(CAM_DEBUG_BUS) || !defined(CAM_DEBUG_TARGET) \
  733     || !defined(CAM_DEBUG_LUN)
  734 #error "You must define all or none of CAM_DEBUG_BUS, CAM_DEBUG_TARGET \
  735         and CAM_DEBUG_LUN"
  736 #endif /* !CAM_DEBUG_BUS || !CAM_DEBUG_TARGET || !CAM_DEBUG_LUN */
  737 #else /* !CAMDEBUG */
  738 #error "You must use options CAMDEBUG if you use the CAM_DEBUG_* options"
  739 #endif /* CAMDEBUG */
  740 #endif /* CAM_DEBUG_BUS || CAM_DEBUG_TARGET || CAM_DEBUG_LUN */
  741 
  742 /* Our boot-time initialization hook */
  743 static int cam_module_event_handler(module_t, int /*modeventtype_t*/, void *);
  744 
  745 static moduledata_t cam_moduledata = {
  746         "cam",
  747         cam_module_event_handler,
  748         NULL
  749 };
  750 
  751 static void     xpt_init(void *);
  752 
  753 DECLARE_MODULE(cam, cam_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND);
  754 MODULE_VERSION(cam, 1);
  755 
  756 
  757 static cam_status       xpt_compile_path(struct cam_path *new_path,
  758                                          struct cam_periph *perph,
  759                                          path_id_t path_id,
  760                                          target_id_t target_id,
  761                                          lun_id_t lun_id);
  762 
  763 static void             xpt_release_path(struct cam_path *path);
  764 
  765 static void             xpt_async_bcast(struct async_list *async_head,
  766                                         u_int32_t async_code,
  767                                         struct cam_path *path,
  768                                         void *async_arg);
  769 static void             xpt_dev_async(u_int32_t async_code,
  770                                       struct cam_eb *bus,
  771                                       struct cam_et *target,
  772                                       struct cam_ed *device,
  773                                       void *async_arg);
  774 static path_id_t xptnextfreepathid(void);
  775 static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus);
  776 static union ccb *xpt_get_ccb(struct cam_ed *device);
  777 static int       xpt_schedule_dev(struct camq *queue, cam_pinfo *dev_pinfo,
  778                                   u_int32_t new_priority);
  779 static void      xpt_run_dev_allocq(struct cam_eb *bus);
  780 static void      xpt_run_dev_sendq(struct cam_eb *bus);
  781 static timeout_t xpt_release_devq_timeout;
  782 static timeout_t xpt_release_simq_timeout;
  783 static void      xpt_release_bus(struct cam_eb *bus);
  784 static void      xpt_release_devq_device(struct cam_ed *dev, u_int count,
  785                                          int run_queue);
  786 static struct cam_et*
  787                  xpt_alloc_target(struct cam_eb *bus, target_id_t target_id);
  788 static void      xpt_release_target(struct cam_eb *bus, struct cam_et *target);
  789 static struct cam_ed*
  790                  xpt_alloc_device(struct cam_eb *bus, struct cam_et *target,
  791                                   lun_id_t lun_id);
  792 static void      xpt_release_device(struct cam_eb *bus, struct cam_et *target,
  793                                     struct cam_ed *device);
  794 static u_int32_t xpt_dev_ccbq_resize(struct cam_path *path, int newopenings);
  795 static struct cam_eb*
  796                  xpt_find_bus(path_id_t path_id);
  797 static struct cam_et*
  798                  xpt_find_target(struct cam_eb *bus, target_id_t target_id);
  799 static struct cam_ed*
  800                  xpt_find_device(struct cam_et *target, lun_id_t lun_id);
  801 static void      xpt_scan_bus(struct cam_periph *periph, union ccb *ccb);
  802 static void      xpt_scan_lun(struct cam_periph *periph,
  803                               struct cam_path *path, cam_flags flags,
  804                               union ccb *ccb);
  805 static void      xptscandone(struct cam_periph *periph, union ccb *done_ccb);
  806 static xpt_busfunc_t    xptconfigbuscountfunc;
  807 static xpt_busfunc_t    xptconfigfunc;
  808 static void      xpt_config(void *arg);
  809 static xpt_devicefunc_t xptpassannouncefunc;
  810 static void      xpt_finishconfig(struct cam_periph *periph, union ccb *ccb);
  811 static void      xptaction(struct cam_sim *sim, union ccb *work_ccb);
  812 static void      xptpoll(struct cam_sim *sim);
  813 static void      camisr(void *);
  814 #if 0
  815 static void      xptstart(struct cam_periph *periph, union ccb *work_ccb);
  816 static void      xptasync(struct cam_periph *periph,
  817                           u_int32_t code, cam_path *path);
  818 #endif
  819 static dev_match_ret    xptbusmatch(struct dev_match_pattern *patterns,
  820                                     u_int num_patterns, struct cam_eb *bus);
  821 static dev_match_ret    xptdevicematch(struct dev_match_pattern *patterns,
  822                                        u_int num_patterns,
  823                                        struct cam_ed *device);
  824 static dev_match_ret    xptperiphmatch(struct dev_match_pattern *patterns,
  825                                        u_int num_patterns,
  826                                        struct cam_periph *periph);
  827 static xpt_busfunc_t    xptedtbusfunc;
  828 static xpt_targetfunc_t xptedttargetfunc;
  829 static xpt_devicefunc_t xptedtdevicefunc;
  830 static xpt_periphfunc_t xptedtperiphfunc;
  831 static xpt_pdrvfunc_t   xptplistpdrvfunc;
  832 static xpt_periphfunc_t xptplistperiphfunc;
  833 static int              xptedtmatch(struct ccb_dev_match *cdm);
  834 static int              xptperiphlistmatch(struct ccb_dev_match *cdm);
  835 static int              xptbustraverse(struct cam_eb *start_bus,
  836                                        xpt_busfunc_t *tr_func, void *arg);
  837 static int              xpttargettraverse(struct cam_eb *bus,
  838                                           struct cam_et *start_target,
  839                                           xpt_targetfunc_t *tr_func, void *arg);
  840 static int              xptdevicetraverse(struct cam_et *target,
  841                                           struct cam_ed *start_device,
  842                                           xpt_devicefunc_t *tr_func, void *arg);
  843 static int              xptperiphtraverse(struct cam_ed *device,
  844                                           struct cam_periph *start_periph,
  845                                           xpt_periphfunc_t *tr_func, void *arg);
  846 static int              xptpdrvtraverse(struct periph_driver **start_pdrv,
  847                                         xpt_pdrvfunc_t *tr_func, void *arg);
  848 static int              xptpdperiphtraverse(struct periph_driver **pdrv,
  849                                             struct cam_periph *start_periph,
  850                                             xpt_periphfunc_t *tr_func,
  851                                             void *arg);
  852 static xpt_busfunc_t    xptdefbusfunc;
  853 static xpt_targetfunc_t xptdeftargetfunc;
  854 static xpt_devicefunc_t xptdefdevicefunc;
  855 static xpt_periphfunc_t xptdefperiphfunc;
  856 static int              xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg);
  857 #ifdef notusedyet
  858 static int              xpt_for_all_targets(xpt_targetfunc_t *tr_func,
  859                                             void *arg);
  860 #endif
  861 static int              xpt_for_all_devices(xpt_devicefunc_t *tr_func,
  862                                             void *arg);
  863 #ifdef notusedyet
  864 static int              xpt_for_all_periphs(xpt_periphfunc_t *tr_func,
  865                                             void *arg);
  866 #endif
  867 static xpt_devicefunc_t xptsetasyncfunc;
  868 static xpt_busfunc_t    xptsetasyncbusfunc;
  869 static cam_status       xptregister(struct cam_periph *periph,
  870                                     void *arg);
  871 static cam_status       proberegister(struct cam_periph *periph,
  872                                       void *arg);
  873 static void      probeschedule(struct cam_periph *probe_periph);
  874 static void      probestart(struct cam_periph *periph, union ccb *start_ccb);
  875 static void      proberequestdefaultnegotiation(struct cam_periph *periph);
  876 static void      probedone(struct cam_periph *periph, union ccb *done_ccb);
  877 static void      probecleanup(struct cam_periph *periph);
  878 static void      xpt_find_quirk(struct cam_ed *device);
  879 #ifdef CAM_NEW_TRAN_CODE
  880 static void      xpt_devise_transport(struct cam_path *path);
  881 #endif /* CAM_NEW_TRAN_CODE */
  882 static void      xpt_set_transfer_settings(struct ccb_trans_settings *cts,
  883                                            struct cam_ed *device,
  884                                            int async_update);
  885 static void      xpt_toggle_tags(struct cam_path *path);
  886 static void      xpt_start_tags(struct cam_path *path);
  887 static __inline int xpt_schedule_dev_allocq(struct cam_eb *bus,
  888                                             struct cam_ed *dev);
  889 static __inline int xpt_schedule_dev_sendq(struct cam_eb *bus,
  890                                            struct cam_ed *dev);
  891 static __inline int periph_is_queued(struct cam_periph *periph);
  892 static __inline int device_is_alloc_queued(struct cam_ed *device);
  893 static __inline int device_is_send_queued(struct cam_ed *device);
  894 static __inline int dev_allocq_is_runnable(struct cam_devq *devq);
  895 
  896 static __inline int
  897 xpt_schedule_dev_allocq(struct cam_eb *bus, struct cam_ed *dev)
  898 {
  899         int retval;
  900 
  901         if (dev->ccbq.devq_openings > 0) {
  902                 if ((dev->flags & CAM_DEV_RESIZE_QUEUE_NEEDED) != 0) {
  903                         cam_ccbq_resize(&dev->ccbq,
  904                                         dev->ccbq.dev_openings
  905                                         + dev->ccbq.dev_active);
  906                         dev->flags &= ~CAM_DEV_RESIZE_QUEUE_NEEDED;
  907                 }
  908                 /*
  909                  * The priority of a device waiting for CCB resources
  910                  * is that of the the highest priority peripheral driver
  911                  * enqueued.
  912                  */
  913                 retval = xpt_schedule_dev(&bus->sim->devq->alloc_queue,
  914                                           &dev->alloc_ccb_entry.pinfo,
  915                                           CAMQ_GET_HEAD(&dev->drvq)->priority); 
  916         } else {
  917                 retval = 0;
  918         }
  919 
  920         return (retval);
  921 }
  922 
  923 static __inline int
  924 xpt_schedule_dev_sendq(struct cam_eb *bus, struct cam_ed *dev)
  925 {
  926         int     retval;
  927 
  928         if (dev->ccbq.dev_openings > 0) {
  929                 /*
  930                  * The priority of a device waiting for controller
  931                  * resources is that of the the highest priority CCB
  932                  * enqueued.
  933                  */
  934                 retval =
  935                     xpt_schedule_dev(&bus->sim->devq->send_queue,
  936                                      &dev->send_ccb_entry.pinfo,
  937                                      CAMQ_GET_HEAD(&dev->ccbq.queue)->priority);
  938         } else {
  939                 retval = 0;
  940         }
  941         return (retval);
  942 }
  943 
  944 static __inline int
  945 periph_is_queued(struct cam_periph *periph)
  946 {
  947         return (periph->pinfo.index != CAM_UNQUEUED_INDEX);
  948 }
  949 
  950 static __inline int
  951 device_is_alloc_queued(struct cam_ed *device)
  952 {
  953         return (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
  954 }
  955 
  956 static __inline int
  957 device_is_send_queued(struct cam_ed *device)
  958 {
  959         return (device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
  960 }
  961 
  962 static __inline int
  963 dev_allocq_is_runnable(struct cam_devq *devq)
  964 {
  965         /*
  966          * Have work to do.
  967          * Have space to do more work.
  968          * Allowed to do work.
  969          */
  970         return ((devq->alloc_queue.qfrozen_cnt == 0)
  971              && (devq->alloc_queue.entries > 0)
  972              && (devq->alloc_openings > 0));
  973 }
  974 
  975 static void
  976 xpt_periph_init()
  977 {
  978         make_dev(&xpt_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "xpt0");
  979 }
  980 
  981 static void
  982 probe_periph_init()
  983 {
  984 }
  985 
  986 
  987 static void
  988 xptdone(struct cam_periph *periph, union ccb *done_ccb)
  989 {
  990         /* Caller will release the CCB */
  991         wakeup(&done_ccb->ccb_h.cbfcnp);
  992 }
  993 
  994 static int
  995 xptopen(struct cdev *dev, int flags, int fmt, struct thread *td)
  996 {
  997         int unit;
  998 
  999         unit = minor(dev) & 0xff;
 1000 
 1001         /*
 1002          * Only allow read-write access.
 1003          */
 1004         if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0))
 1005                 return(EPERM);
 1006 
 1007         /*
 1008          * We don't allow nonblocking access.
 1009          */
 1010         if ((flags & O_NONBLOCK) != 0) {
 1011                 printf("xpt%d: can't do nonblocking access\n", unit);
 1012                 return(ENODEV);
 1013         }
 1014 
 1015         /*
 1016          * We only have one transport layer right now.  If someone accesses
 1017          * us via something other than minor number 1, point out their
 1018          * mistake.
 1019          */
 1020         if (unit != 0) {
 1021                 printf("xptopen: got invalid xpt unit %d\n", unit);
 1022                 return(ENXIO);
 1023         }
 1024 
 1025         /* Mark ourselves open */
 1026         xsoftc.flags |= XPT_FLAG_OPEN;
 1027         
 1028         return(0);
 1029 }
 1030 
 1031 static int
 1032 xptclose(struct cdev *dev, int flag, int fmt, struct thread *td)
 1033 {
 1034         int unit;
 1035 
 1036         unit = minor(dev) & 0xff;
 1037 
 1038         /*
 1039          * We only have one transport layer right now.  If someone accesses
 1040          * us via something other than minor number 1, point out their
 1041          * mistake.
 1042          */
 1043         if (unit != 0) {
 1044                 printf("xptclose: got invalid xpt unit %d\n", unit);
 1045                 return(ENXIO);
 1046         }
 1047 
 1048         /* Mark ourselves closed */
 1049         xsoftc.flags &= ~XPT_FLAG_OPEN;
 1050 
 1051         return(0);
 1052 }
 1053 
 1054 static int
 1055 xptioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
 1056 {
 1057         int unit, error;
 1058 
 1059         error = 0;
 1060         unit = minor(dev) & 0xff;
 1061 
 1062         /*
 1063          * We only have one transport layer right now.  If someone accesses
 1064          * us via something other than minor number 1, point out their
 1065          * mistake.
 1066          */
 1067         if (unit != 0) {
 1068                 printf("xptioctl: got invalid xpt unit %d\n", unit);
 1069                 return(ENXIO);
 1070         }
 1071 
 1072         switch(cmd) {
 1073         /*
 1074          * For the transport layer CAMIOCOMMAND ioctl, we really only want
 1075          * to accept CCB types that don't quite make sense to send through a
 1076          * passthrough driver. XPT_PATH_INQ is an exception to this, as stated
 1077          * in the CAM spec.
 1078          */
 1079         case CAMIOCOMMAND: {
 1080                 union ccb *ccb;
 1081                 union ccb *inccb;
 1082 
 1083                 inccb = (union ccb *)addr;
 1084 
 1085                 switch(inccb->ccb_h.func_code) {
 1086                 case XPT_SCAN_BUS:
 1087                 case XPT_RESET_BUS:
 1088                         if ((inccb->ccb_h.target_id != CAM_TARGET_WILDCARD)
 1089                          || (inccb->ccb_h.target_lun != CAM_LUN_WILDCARD)) {
 1090                                 error = EINVAL;
 1091                                 break;
 1092                         }
 1093                         /* FALLTHROUGH */
 1094                 case XPT_PATH_INQ:
 1095                 case XPT_ENG_INQ:
 1096                 case XPT_SCAN_LUN:
 1097 
 1098                         ccb = xpt_alloc_ccb();
 1099 
 1100                         /*
 1101                          * Create a path using the bus, target, and lun the
 1102                          * user passed in.
 1103                          */
 1104                         if (xpt_create_path(&ccb->ccb_h.path, xpt_periph,
 1105                                             inccb->ccb_h.path_id,
 1106                                             inccb->ccb_h.target_id,
 1107                                             inccb->ccb_h.target_lun) !=
 1108                                             CAM_REQ_CMP){
 1109                                 error = EINVAL;
 1110                                 xpt_free_ccb(ccb);
 1111                                 break;
 1112                         }
 1113                         /* Ensure all of our fields are correct */
 1114                         xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path,
 1115                                       inccb->ccb_h.pinfo.priority);
 1116                         xpt_merge_ccb(ccb, inccb);
 1117                         ccb->ccb_h.cbfcnp = xptdone;
 1118                         cam_periph_runccb(ccb, NULL, 0, 0, NULL);
 1119                         bcopy(ccb, inccb, sizeof(union ccb));
 1120                         xpt_free_path(ccb->ccb_h.path);
 1121                         xpt_free_ccb(ccb);
 1122                         break;
 1123 
 1124                 case XPT_DEBUG: {
 1125                         union ccb ccb;
 1126 
 1127                         /*
 1128                          * This is an immediate CCB, so it's okay to
 1129                          * allocate it on the stack.
 1130                          */
 1131 
 1132                         /*
 1133                          * Create a path using the bus, target, and lun the
 1134                          * user passed in.
 1135                          */
 1136                         if (xpt_create_path(&ccb.ccb_h.path, xpt_periph,
 1137                                             inccb->ccb_h.path_id,
 1138                                             inccb->ccb_h.target_id,
 1139                                             inccb->ccb_h.target_lun) !=
 1140                                             CAM_REQ_CMP){
 1141                                 error = EINVAL;
 1142                                 break;
 1143                         }
 1144                         /* Ensure all of our fields are correct */
 1145                         xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path,
 1146                                       inccb->ccb_h.pinfo.priority);
 1147                         xpt_merge_ccb(&ccb, inccb);
 1148                         ccb.ccb_h.cbfcnp = xptdone;
 1149                         xpt_action(&ccb);
 1150                         bcopy(&ccb, inccb, sizeof(union ccb));
 1151                         xpt_free_path(ccb.ccb_h.path);
 1152                         break;
 1153 
 1154                 }
 1155                 case XPT_DEV_MATCH: {
 1156                         struct cam_periph_map_info mapinfo;
 1157                         struct cam_path *old_path;
 1158 
 1159                         /*
 1160                          * We can't deal with physical addresses for this
 1161                          * type of transaction.
 1162                          */
 1163                         if (inccb->ccb_h.flags & CAM_DATA_PHYS) {
 1164                                 error = EINVAL;
 1165                                 break;
 1166                         }
 1167 
 1168                         /*
 1169                          * Save this in case the caller had it set to
 1170                          * something in particular.
 1171                          */
 1172                         old_path = inccb->ccb_h.path;
 1173 
 1174                         /*
 1175                          * We really don't need a path for the matching
 1176                          * code.  The path is needed because of the
 1177                          * debugging statements in xpt_action().  They
 1178                          * assume that the CCB has a valid path.
 1179                          */
 1180                         inccb->ccb_h.path = xpt_periph->path;
 1181 
 1182                         bzero(&mapinfo, sizeof(mapinfo));
 1183 
 1184                         /*
 1185                          * Map the pattern and match buffers into kernel
 1186                          * virtual address space.
 1187                          */
 1188                         error = cam_periph_mapmem(inccb, &mapinfo);
 1189 
 1190                         if (error) {
 1191                                 inccb->ccb_h.path = old_path;
 1192                                 break;
 1193                         }
 1194 
 1195                         /*
 1196                          * This is an immediate CCB, we can send it on directly.
 1197                          */
 1198                         xpt_action(inccb);
 1199 
 1200                         /*
 1201                          * Map the buffers back into user space.
 1202                          */
 1203                         cam_periph_unmapmem(inccb, &mapinfo);
 1204 
 1205                         inccb->ccb_h.path = old_path;
 1206 
 1207                         error = 0;
 1208                         break;
 1209                 }
 1210                 default:
 1211                         error = ENOTSUP;
 1212                         break;
 1213                 }
 1214                 break;
 1215         }
 1216         /*
 1217          * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input,
 1218          * with the periphal driver name and unit name filled in.  The other
 1219          * fields don't really matter as input.  The passthrough driver name
 1220          * ("pass"), and unit number are passed back in the ccb.  The current
 1221          * device generation number, and the index into the device peripheral
 1222          * driver list, and the status are also passed back.  Note that
 1223          * since we do everything in one pass, unlike the XPT_GDEVLIST ccb,
 1224          * we never return a status of CAM_GDEVLIST_LIST_CHANGED.  It is
 1225          * (or rather should be) impossible for the device peripheral driver
 1226          * list to change since we look at the whole thing in one pass, and
 1227          * we do it with splcam protection.
 1228          * 
 1229          */
 1230         case CAMGETPASSTHRU: {
 1231                 union ccb *ccb;
 1232                 struct cam_periph *periph;
 1233                 struct periph_driver **p_drv;
 1234                 char   *name;
 1235                 u_int unit;
 1236                 u_int cur_generation;
 1237                 int base_periph_found;
 1238                 int splbreaknum;
 1239                 int s;
 1240 
 1241                 ccb = (union ccb *)addr;
 1242                 unit = ccb->cgdl.unit_number;
 1243                 name = ccb->cgdl.periph_name;
 1244                 /*
 1245                  * Every 100 devices, we want to drop our spl protection to
 1246                  * give the software interrupt handler a chance to run.
 1247                  * Most systems won't run into this check, but this should
 1248                  * avoid starvation in the software interrupt handler in
 1249                  * large systems.
 1250                  */
 1251                 splbreaknum = 100;
 1252 
 1253                 ccb = (union ccb *)addr;
 1254 
 1255                 base_periph_found = 0;
 1256 
 1257                 /*
 1258                  * Sanity check -- make sure we don't get a null peripheral
 1259                  * driver name.
 1260                  */
 1261                 if (*ccb->cgdl.periph_name == '\0') {
 1262                         error = EINVAL;
 1263                         break;
 1264                 }
 1265 
 1266                 /* Keep the list from changing while we traverse it */
 1267                 s = splcam();
 1268 ptstartover:
 1269                 cur_generation = xsoftc.generation;
 1270 
 1271                 /* first find our driver in the list of drivers */
 1272                 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++)
 1273                         if (strcmp((*p_drv)->driver_name, name) == 0)
 1274                                 break;
 1275 
 1276                 if (*p_drv == NULL) {
 1277                         splx(s);
 1278                         ccb->ccb_h.status = CAM_REQ_CMP_ERR;
 1279                         ccb->cgdl.status = CAM_GDEVLIST_ERROR;
 1280                         *ccb->cgdl.periph_name = '\0';
 1281                         ccb->cgdl.unit_number = 0;
 1282                         error = ENOENT;
 1283                         break;
 1284                 }       
 1285 
 1286                 /*
 1287                  * Run through every peripheral instance of this driver
 1288                  * and check to see whether it matches the unit passed
 1289                  * in by the user.  If it does, get out of the loops and
 1290                  * find the passthrough driver associated with that
 1291                  * peripheral driver.
 1292                  */
 1293                 for (periph = TAILQ_FIRST(&(*p_drv)->units); periph != NULL;
 1294                      periph = TAILQ_NEXT(periph, unit_links)) {
 1295 
 1296                         if (periph->unit_number == unit) {
 1297                                 break;
 1298                         } else if (--splbreaknum == 0) {
 1299                                 splx(s);
 1300                                 s = splcam();
 1301                                 splbreaknum = 100;
 1302                                 if (cur_generation != xsoftc.generation)
 1303                                        goto ptstartover;
 1304                         }
 1305                 }
 1306                 /*
 1307                  * If we found the peripheral driver that the user passed
 1308                  * in, go through all of the peripheral drivers for that
 1309                  * particular device and look for a passthrough driver.
 1310                  */
 1311                 if (periph != NULL) {
 1312                         struct cam_ed *device;
 1313                         int i;
 1314 
 1315                         base_periph_found = 1;
 1316                         device = periph->path->device;
 1317                         for (i = 0, periph = SLIST_FIRST(&device->periphs);
 1318                              periph != NULL;
 1319                              periph = SLIST_NEXT(periph, periph_links), i++) {
 1320                                 /*
 1321                                  * Check to see whether we have a
 1322                                  * passthrough device or not. 
 1323                                  */
 1324                                 if (strcmp(periph->periph_name, "pass") == 0) {
 1325                                         /*
 1326                                          * Fill in the getdevlist fields.
 1327                                          */
 1328                                         strcpy(ccb->cgdl.periph_name,
 1329                                                periph->periph_name);
 1330                                         ccb->cgdl.unit_number =
 1331                                                 periph->unit_number;
 1332                                         if (SLIST_NEXT(periph, periph_links))
 1333                                                 ccb->cgdl.status =
 1334                                                         CAM_GDEVLIST_MORE_DEVS;
 1335                                         else
 1336                                                 ccb->cgdl.status =
 1337                                                        CAM_GDEVLIST_LAST_DEVICE;
 1338                                         ccb->cgdl.generation =
 1339                                                 device->generation;
 1340                                         ccb->cgdl.index = i;
 1341                                         /*
 1342                                          * Fill in some CCB header fields
 1343                                          * that the user may want.
 1344                                          */
 1345                                         ccb->ccb_h.path_id =
 1346                                                 periph->path->bus->path_id;
 1347                                         ccb->ccb_h.target_id =
 1348                                                 periph->path->target->target_id;
 1349                                         ccb->ccb_h.target_lun =
 1350                                                 periph->path->device->lun_id;
 1351                                         ccb->ccb_h.status = CAM_REQ_CMP;
 1352                                         break;
 1353                                 }
 1354                         }
 1355                 }
 1356 
 1357                 /*
 1358                  * If the periph is null here, one of two things has
 1359                  * happened.  The first possibility is that we couldn't
 1360                  * find the unit number of the particular peripheral driver
 1361                  * that the user is asking about.  e.g. the user asks for
 1362                  * the passthrough driver for "da11".  We find the list of
 1363                  * "da" peripherals all right, but there is no unit 11.
 1364                  * The other possibility is that we went through the list
 1365                  * of peripheral drivers attached to the device structure,
 1366                  * but didn't find one with the name "pass".  Either way,
 1367                  * we return ENOENT, since we couldn't find something.
 1368                  */
 1369                 if (periph == NULL) {
 1370                         ccb->ccb_h.status = CAM_REQ_CMP_ERR;
 1371                         ccb->cgdl.status = CAM_GDEVLIST_ERROR;
 1372                         *ccb->cgdl.periph_name = '\0';
 1373                         ccb->cgdl.unit_number = 0;
 1374                         error = ENOENT;
 1375                         /*
 1376                          * It is unfortunate that this is even necessary,
 1377                          * but there are many, many clueless users out there.
 1378                          * If this is true, the user is looking for the
 1379                          * passthrough driver, but doesn't have one in his
 1380                          * kernel.
 1381                          */
 1382                         if (base_periph_found == 1) {
 1383                                 printf("xptioctl: pass driver is not in the "
 1384                                        "kernel\n");
 1385                                 printf("xptioctl: put \"device pass0\" in "
 1386                                        "your kernel config file\n");
 1387                         }
 1388                 }
 1389                 splx(s);
 1390                 break;
 1391                 }
 1392         default:
 1393                 error = ENOTTY;
 1394                 break;
 1395         }
 1396 
 1397         return(error);
 1398 }
 1399 
 1400 static int
 1401 cam_module_event_handler(module_t mod, int what, void *arg)
 1402 {
 1403         if (what == MOD_LOAD) {
 1404                 xpt_init(NULL);
 1405         } else if (what == MOD_UNLOAD) {
 1406                 return EBUSY;
 1407         } else {
 1408                 return EOPNOTSUPP;
 1409         }
 1410 
 1411         return 0;
 1412 }
 1413 
 1414 /* Functions accessed by the peripheral drivers */
 1415 static void
 1416 xpt_init(dummy)
 1417         void *dummy;
 1418 {
 1419         struct cam_sim *xpt_sim;
 1420         struct cam_path *path;
 1421         struct cam_devq *devq;
 1422         cam_status status;
 1423 
 1424         TAILQ_INIT(&xpt_busses);
 1425         TAILQ_INIT(&cam_bioq);
 1426         SLIST_INIT(&ccb_freeq);
 1427         STAILQ_INIT(&highpowerq);
 1428 
 1429         mtx_init(&cam_bioq_lock, "CAM BIOQ lock", NULL, MTX_DEF);
 1430 
 1431         /*
 1432          * The xpt layer is, itself, the equivelent of a SIM.
 1433          * Allow 16 ccbs in the ccb pool for it.  This should
 1434          * give decent parallelism when we probe busses and
 1435          * perform other XPT functions.
 1436          */
 1437         devq = cam_simq_alloc(16);
 1438         xpt_sim = cam_sim_alloc(xptaction,
 1439                                 xptpoll,
 1440                                 "xpt",
 1441                                 /*softc*/NULL,
 1442                                 /*unit*/0,
 1443                                 /*max_dev_transactions*/0,
 1444                                 /*max_tagged_dev_transactions*/0,
 1445                                 devq);
 1446         xpt_max_ccbs = 16;
 1447                                 
 1448         xpt_bus_register(xpt_sim, /*bus #*/0);
 1449 
 1450         /*
 1451          * Looking at the XPT from the SIM layer, the XPT is
 1452          * the equivelent of a peripheral driver.  Allocate
 1453          * a peripheral driver entry for us.
 1454          */
 1455         if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID,
 1456                                       CAM_TARGET_WILDCARD,
 1457                                       CAM_LUN_WILDCARD)) != CAM_REQ_CMP) {
 1458                 printf("xpt_init: xpt_create_path failed with status %#x,"
 1459                        " failing attach\n", status);
 1460                 return;
 1461         }
 1462 
 1463         cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO,
 1464                          path, NULL, 0, NULL);
 1465         xpt_free_path(path);
 1466 
 1467         xpt_sim->softc = xpt_periph;
 1468 
 1469         /*
 1470          * Register a callback for when interrupts are enabled.
 1471          */
 1472         xpt_config_hook =
 1473             (struct intr_config_hook *)malloc(sizeof(struct intr_config_hook),
 1474                                               M_TEMP, M_NOWAIT | M_ZERO);
 1475         if (xpt_config_hook == NULL) {
 1476                 printf("xpt_init: Cannot malloc config hook "
 1477                        "- failing attach\n");
 1478                 return;
 1479         }
 1480 
 1481         xpt_config_hook->ich_func = xpt_config;
 1482         if (config_intrhook_establish(xpt_config_hook) != 0) {
 1483                 free (xpt_config_hook, M_TEMP);
 1484                 printf("xpt_init: config_intrhook_establish failed "
 1485                        "- failing attach\n");
 1486         }
 1487 
 1488         /* Install our software interrupt handlers */
 1489         swi_add(NULL, "cambio", camisr, &cam_bioq, SWI_CAMBIO, 0, &cambio_ih);
 1490 }
 1491 
 1492 static cam_status
 1493 xptregister(struct cam_periph *periph, void *arg)
 1494 {
 1495         if (periph == NULL) {
 1496                 printf("xptregister: periph was NULL!!\n");
 1497                 return(CAM_REQ_CMP_ERR);
 1498         }
 1499 
 1500         periph->softc = NULL;
 1501 
 1502         xpt_periph = periph;
 1503 
 1504         return(CAM_REQ_CMP);
 1505 }
 1506 
 1507 int32_t
 1508 xpt_add_periph(struct cam_periph *periph)
 1509 {
 1510         struct cam_ed *device;
 1511         int32_t  status;
 1512         struct periph_list *periph_head;
 1513 
 1514         GIANT_REQUIRED;
 1515 
 1516         device = periph->path->device;
 1517 
 1518         periph_head = &device->periphs;
 1519 
 1520         status = CAM_REQ_CMP;
 1521 
 1522         if (device != NULL) {
 1523                 int s;
 1524 
 1525                 /*
 1526                  * Make room for this peripheral
 1527                  * so it will fit in the queue
 1528                  * when it's scheduled to run
 1529                  */
 1530                 s = splsoftcam();
 1531                 status = camq_resize(&device->drvq,
 1532                                      device->drvq.array_size + 1);
 1533 
 1534                 device->generation++;
 1535 
 1536                 SLIST_INSERT_HEAD(periph_head, periph, periph_links);
 1537 
 1538                 splx(s);
 1539         }
 1540 
 1541         xsoftc.generation++;
 1542 
 1543         return (status);
 1544 }
 1545 
 1546 void
 1547 xpt_remove_periph(struct cam_periph *periph)
 1548 {
 1549         struct cam_ed *device;
 1550 
 1551         GIANT_REQUIRED;
 1552 
 1553         device = periph->path->device;
 1554 
 1555         if (device != NULL) {
 1556                 int s;
 1557                 struct periph_list *periph_head;
 1558 
 1559                 periph_head = &device->periphs;
 1560                 
 1561                 /* Release the slot for this peripheral */
 1562                 s = splsoftcam();
 1563                 camq_resize(&device->drvq, device->drvq.array_size - 1);
 1564 
 1565                 device->generation++;
 1566 
 1567                 SLIST_REMOVE(periph_head, periph, cam_periph, periph_links);
 1568 
 1569                 splx(s);
 1570         }
 1571 
 1572         xsoftc.generation++;
 1573 
 1574 }
 1575 
 1576 #ifdef CAM_NEW_TRAN_CODE
 1577 
 1578 void
 1579 xpt_announce_periph(struct cam_periph *periph, char *announce_string)
 1580 {
 1581         struct  ccb_pathinq cpi;
 1582         struct  ccb_trans_settings cts;
 1583         struct  cam_path *path;
 1584         u_int   speed;
 1585         u_int   freq;
 1586         u_int   mb;
 1587         int     s;
 1588 
 1589         GIANT_REQUIRED;
 1590 
 1591         path = periph->path;
 1592         /*
 1593          * To ensure that this is printed in one piece,
 1594          * mask out CAM interrupts.
 1595          */
 1596         s = splsoftcam();
 1597         printf("%s%d at %s%d bus %d target %d lun %d\n",
 1598                periph->periph_name, periph->unit_number,
 1599                path->bus->sim->sim_name,
 1600                path->bus->sim->unit_number,
 1601                path->bus->sim->bus_id,
 1602                path->target->target_id,
 1603                path->device->lun_id);
 1604         printf("%s%d: ", periph->periph_name, periph->unit_number);
 1605         scsi_print_inquiry(&path->device->inq_data);
 1606         if (bootverbose && path->device->serial_num_len > 0) {
 1607                 /* Don't wrap the screen  - print only the first 60 chars */
 1608                 printf("%s%d: Serial Number %.60s\n", periph->periph_name,
 1609                        periph->unit_number, path->device->serial_num);
 1610         }
 1611         xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1);
 1612         cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
 1613         cts.type = CTS_TYPE_CURRENT_SETTINGS;
 1614         xpt_action((union ccb*)&cts);
 1615 
 1616         /* Ask the SIM for its base transfer speed */
 1617         xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
 1618         cpi.ccb_h.func_code = XPT_PATH_INQ;
 1619         xpt_action((union ccb *)&cpi);
 1620 
 1621         speed = cpi.base_transfer_speed;
 1622         freq = 0;
 1623         if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SPI) {
 1624                 struct  ccb_trans_settings_spi *spi;
 1625 
 1626                 spi = &cts.xport_specific.spi;
 1627                 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0
 1628                   && spi->sync_offset != 0) {
 1629                         freq = scsi_calc_syncsrate(spi->sync_period);
 1630                         speed = freq;
 1631                 }
 1632 
 1633                 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0)
 1634                         speed *= (0x01 << spi->bus_width);
 1635         }
 1636 
 1637         if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_FC) {
 1638                 struct  ccb_trans_settings_fc *fc = &cts.xport_specific.fc;
 1639                 if (fc->valid & CTS_FC_VALID_SPEED) {
 1640                         speed = fc->bitrate;
 1641                 }
 1642         }
 1643 
 1644         mb = speed / 1000;
 1645         if (mb > 0)
 1646                 printf("%s%d: %d.%03dMB/s transfers",
 1647                        periph->periph_name, periph->unit_number,
 1648                        mb, speed % 1000);
 1649         else
 1650                 printf("%s%d: %dKB/s transfers", periph->periph_name,
 1651                        periph->unit_number, speed);
 1652         /* Report additional information about SPI connections */
 1653         if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SPI) {
 1654                 struct  ccb_trans_settings_spi *spi;
 1655 
 1656                 spi = &cts.xport_specific.spi;
 1657                 if (freq != 0) {
 1658                         printf(" (%d.%03dMHz%s, offset %d", freq / 1000,
 1659                                freq % 1000,
 1660                                (spi->ppr_options & MSG_EXT_PPR_DT_REQ) != 0
 1661                              ? " DT" : "",
 1662                                spi->sync_offset);
 1663                 }
 1664                 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0
 1665                  && spi->bus_width > 0) {
 1666                         if (freq != 0) {
 1667                                 printf(", ");
 1668                         } else {
 1669                                 printf(" (");
 1670                         }
 1671                         printf("%dbit)", 8 * (0x01 << spi->bus_width));
 1672                 } else if (freq != 0) {
 1673                         printf(")");
 1674                 }
 1675         }
 1676         if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_FC) {
 1677                 struct  ccb_trans_settings_fc *fc;
 1678 
 1679                 fc = &cts.xport_specific.fc;
 1680                 if (fc->valid & CTS_FC_VALID_WWNN)
 1681                         printf(" WWNN 0x%llx", (long long) fc->wwnn);
 1682                 if (fc->valid & CTS_FC_VALID_WWPN)
 1683                         printf(" WWPN 0x%llx", (long long) fc->wwpn);
 1684                 if (fc->valid & CTS_FC_VALID_PORT)
 1685                         printf(" PortID 0x%x", fc->port);
 1686         }
 1687 
 1688         if (path->device->inq_flags & SID_CmdQue
 1689          || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
 1690                 printf("\n%s%d: Tagged Queueing Enabled",
 1691                        periph->periph_name, periph->unit_number);
 1692         }
 1693         printf("\n");
 1694 
 1695         /*
 1696          * We only want to print the caller's announce string if they've
 1697          * passed one in..
 1698          */
 1699         if (announce_string != NULL)
 1700                 printf("%s%d: %s\n", periph->periph_name,
 1701                        periph->unit_number, announce_string);
 1702         splx(s);
 1703 }
 1704 #else /* CAM_NEW_TRAN_CODE */
 1705 void
 1706 xpt_announce_periph(struct cam_periph *periph, char *announce_string)
 1707 {
 1708         int s;
 1709         u_int mb;
 1710         struct cam_path *path;
 1711         struct ccb_trans_settings cts;
 1712 
 1713         GIANT_REQUIRED;
 1714 
 1715         path = periph->path;
 1716         /*
 1717          * To ensure that this is printed in one piece,
 1718          * mask out CAM interrupts.
 1719          */
 1720         s = splsoftcam();
 1721         printf("%s%d at %s%d bus %d target %d lun %d\n",
 1722                periph->periph_name, periph->unit_number,
 1723                path->bus->sim->sim_name,
 1724                path->bus->sim->unit_number,
 1725                path->bus->sim->bus_id,
 1726                path->target->target_id,
 1727                path->device->lun_id);
 1728         printf("%s%d: ", periph->periph_name, periph->unit_number);
 1729         scsi_print_inquiry(&path->device->inq_data);
 1730         if ((bootverbose)
 1731          && (path->device->serial_num_len > 0)) {
 1732                 /* Don't wrap the screen  - print only the first 60 chars */
 1733                 printf("%s%d: Serial Number %.60s\n", periph->periph_name,
 1734                        periph->unit_number, path->device->serial_num);
 1735         }
 1736         xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1);
 1737         cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
 1738         cts.flags = CCB_TRANS_CURRENT_SETTINGS;
 1739         xpt_action((union ccb*)&cts);
 1740         if (cts.ccb_h.status == CAM_REQ_CMP) {
 1741                 u_int speed;
 1742                 u_int freq;
 1743 
 1744                 if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
 1745                   && cts.sync_offset != 0) {
 1746                         freq = scsi_calc_syncsrate(cts.sync_period);
 1747                         speed = freq;
 1748                 } else {
 1749                         struct ccb_pathinq cpi;
 1750 
 1751                         /* Ask the SIM for its base transfer speed */
 1752                         xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
 1753                         cpi.ccb_h.func_code = XPT_PATH_INQ;
 1754                         xpt_action((union ccb *)&cpi);
 1755 
 1756                         speed = cpi.base_transfer_speed;
 1757                         freq = 0;
 1758                 }
 1759                 if ((cts.valid & CCB_TRANS_BUS_WIDTH_VALID) != 0)
 1760                         speed *= (0x01 << cts.bus_width);
 1761                 mb = speed / 1000;
 1762                 if (mb > 0)
 1763                         printf("%s%d: %d.%03dMB/s transfers",
 1764                                periph->periph_name, periph->unit_number,
 1765                                mb, speed % 1000);
 1766                 else
 1767                         printf("%s%d: %dKB/s transfers", periph->periph_name,
 1768                                periph->unit_number, speed);
 1769                 if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
 1770                  && cts.sync_offset != 0) {
 1771                         printf(" (%d.%03dMHz, offset %d", freq / 1000,
 1772                                freq % 1000, cts.sync_offset);
 1773                 }
 1774                 if ((cts.valid & CCB_TRANS_BUS_WIDTH_VALID) != 0
 1775                  && cts.bus_width > 0) {
 1776                         if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
 1777                          && cts.sync_offset != 0) {
 1778                                 printf(", ");
 1779                         } else {
 1780                                 printf(" (");
 1781                         }
 1782                         printf("%dbit)", 8 * (0x01 << cts.bus_width));
 1783                 } else if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
 1784                         && cts.sync_offset != 0) {
 1785                         printf(")");
 1786                 }
 1787 
 1788                 if (path->device->inq_flags & SID_CmdQue
 1789                  || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
 1790                         printf(", Tagged Queueing Enabled");
 1791                 }
 1792 
 1793                 printf("\n");
 1794         } else if (path->device->inq_flags & SID_CmdQue
 1795                 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
 1796                 printf("%s%d: Tagged Queueing Enabled\n",
 1797                        periph->periph_name, periph->unit_number);
 1798         }
 1799 
 1800         /*
 1801          * We only want to print the caller's announce string if they've
 1802          * passed one in..
 1803          */
 1804         if (announce_string != NULL)
 1805                 printf("%s%d: %s\n", periph->periph_name,
 1806                        periph->unit_number, announce_string);
 1807         splx(s);
 1808 }
 1809 
 1810 #endif /* CAM_NEW_TRAN_CODE */
 1811 
 1812 static dev_match_ret
 1813 xptbusmatch(struct dev_match_pattern *patterns, u_int num_patterns,
 1814             struct cam_eb *bus)
 1815 {
 1816         dev_match_ret retval;
 1817         int i;
 1818 
 1819         retval = DM_RET_NONE;
 1820 
 1821         /*
 1822          * If we aren't given something to match against, that's an error.
 1823          */
 1824         if (bus == NULL)
 1825                 return(DM_RET_ERROR);
 1826 
 1827         /*
 1828          * If there are no match entries, then this bus matches no
 1829          * matter what.
 1830          */
 1831         if ((patterns == NULL) || (num_patterns == 0))
 1832                 return(DM_RET_DESCEND | DM_RET_COPY);
 1833 
 1834         for (i = 0; i < num_patterns; i++) {
 1835                 struct bus_match_pattern *cur_pattern;
 1836 
 1837                 /*
 1838                  * If the pattern in question isn't for a bus node, we
 1839                  * aren't interested.  However, we do indicate to the
 1840                  * calling routine that we should continue descending the
 1841                  * tree, since the user wants to match against lower-level
 1842                  * EDT elements.
 1843                  */
 1844                 if (patterns[i].type != DEV_MATCH_BUS) {
 1845                         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
 1846                                 retval |= DM_RET_DESCEND;
 1847                         continue;
 1848                 }
 1849 
 1850                 cur_pattern = &patterns[i].pattern.bus_pattern;
 1851 
 1852                 /*
 1853                  * If they want to match any bus node, we give them any
 1854                  * device node.
 1855                  */
 1856                 if (cur_pattern->flags == BUS_MATCH_ANY) {
 1857                         /* set the copy flag */
 1858                         retval |= DM_RET_COPY;
 1859 
 1860                         /*
 1861                          * If we've already decided on an action, go ahead
 1862                          * and return.
 1863                          */
 1864                         if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
 1865                                 return(retval);
 1866                 }
 1867 
 1868                 /*
 1869                  * Not sure why someone would do this...
 1870                  */
 1871                 if (cur_pattern->flags == BUS_MATCH_NONE)
 1872                         continue;
 1873 
 1874                 if (((cur_pattern->flags & BUS_MATCH_PATH) != 0)
 1875                  && (cur_pattern->path_id != bus->path_id))
 1876                         continue;
 1877 
 1878                 if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0)
 1879                  && (cur_pattern->bus_id != bus->sim->bus_id))
 1880                         continue;
 1881 
 1882                 if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0)
 1883                  && (cur_pattern->unit_number != bus->sim->unit_number))
 1884                         continue;
 1885 
 1886                 if (((cur_pattern->flags & BUS_MATCH_NAME) != 0)
 1887                  && (strncmp(cur_pattern->dev_name, bus->sim->sim_name,
 1888                              DEV_IDLEN) != 0))
 1889                         continue;
 1890 
 1891                 /*
 1892                  * If we get to this point, the user definitely wants 
 1893                  * information on this bus.  So tell the caller to copy the
 1894                  * data out.
 1895                  */
 1896                 retval |= DM_RET_COPY;
 1897 
 1898                 /*
 1899                  * If the return action has been set to descend, then we
 1900                  * know that we've already seen a non-bus matching
 1901                  * expression, therefore we need to further descend the tree.
 1902                  * This won't change by continuing around the loop, so we
 1903                  * go ahead and return.  If we haven't seen a non-bus
 1904                  * matching expression, we keep going around the loop until
 1905                  * we exhaust the matching expressions.  We'll set the stop
 1906                  * flag once we fall out of the loop.
 1907                  */
 1908                 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
 1909                         return(retval);
 1910         }
 1911 
 1912         /*
 1913          * If the return action hasn't been set to descend yet, that means
 1914          * we haven't seen anything other than bus matching patterns.  So
 1915          * tell the caller to stop descending the tree -- the user doesn't
 1916          * want to match against lower level tree elements.
 1917          */
 1918         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
 1919                 retval |= DM_RET_STOP;
 1920 
 1921         return(retval);
 1922 }
 1923 
 1924 static dev_match_ret
 1925 xptdevicematch(struct dev_match_pattern *patterns, u_int num_patterns,
 1926                struct cam_ed *device)
 1927 {
 1928         dev_match_ret retval;
 1929         int i;
 1930 
 1931         retval = DM_RET_NONE;
 1932 
 1933         /*
 1934          * If we aren't given something to match against, that's an error.
 1935          */
 1936         if (device == NULL)
 1937                 return(DM_RET_ERROR);
 1938 
 1939         /*
 1940          * If there are no match entries, then this device matches no
 1941          * matter what.
 1942          */
 1943         if ((patterns == NULL) || (num_patterns == 0))
 1944                 return(DM_RET_DESCEND | DM_RET_COPY);
 1945 
 1946         for (i = 0; i < num_patterns; i++) {
 1947                 struct device_match_pattern *cur_pattern;
 1948 
 1949                 /*
 1950                  * If the pattern in question isn't for a device node, we
 1951                  * aren't interested.
 1952                  */
 1953                 if (patterns[i].type != DEV_MATCH_DEVICE) {
 1954                         if ((patterns[i].type == DEV_MATCH_PERIPH)
 1955                          && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE))
 1956                                 retval |= DM_RET_DESCEND;
 1957                         continue;
 1958                 }
 1959 
 1960                 cur_pattern = &patterns[i].pattern.device_pattern;
 1961 
 1962                 /*
 1963                  * If they want to match any device node, we give them any
 1964                  * device node.
 1965                  */
 1966                 if (cur_pattern->flags == DEV_MATCH_ANY) {
 1967                         /* set the copy flag */
 1968                         retval |= DM_RET_COPY;
 1969 
 1970                         
 1971                         /*
 1972                          * If we've already decided on an action, go ahead
 1973                          * and return.
 1974                          */
 1975                         if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
 1976                                 return(retval);
 1977                 }
 1978 
 1979                 /*
 1980                  * Not sure why someone would do this...
 1981                  */
 1982                 if (cur_pattern->flags == DEV_MATCH_NONE)
 1983                         continue;
 1984 
 1985                 if (((cur_pattern->flags & DEV_MATCH_PATH) != 0)
 1986                  && (cur_pattern->path_id != device->target->bus->path_id))
 1987                         continue;
 1988 
 1989                 if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0)
 1990                  && (cur_pattern->target_id != device->target->target_id))
 1991                         continue;
 1992 
 1993                 if (((cur_pattern->flags & DEV_MATCH_LUN) != 0)
 1994                  && (cur_pattern->target_lun != device->lun_id))
 1995                         continue;
 1996 
 1997                 if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0)
 1998                  && (cam_quirkmatch((caddr_t)&device->inq_data,
 1999                                     (caddr_t)&cur_pattern->inq_pat,
 2000                                     1, sizeof(cur_pattern->inq_pat),
 2001                                     scsi_static_inquiry_match) == NULL))
 2002                         continue;
 2003 
 2004                 /*
 2005                  * If we get to this point, the user definitely wants 
 2006                  * information on this device.  So tell the caller to copy
 2007                  * the data out.
 2008                  */
 2009                 retval |= DM_RET_COPY;
 2010 
 2011                 /*
 2012                  * If the return action has been set to descend, then we
 2013                  * know that we've already seen a peripheral matching
 2014                  * expression, therefore we need to further descend the tree.
 2015                  * This won't change by continuing around the loop, so we
 2016                  * go ahead and return.  If we haven't seen a peripheral
 2017                  * matching expression, we keep going around the loop until
 2018                  * we exhaust the matching expressions.  We'll set the stop
 2019                  * flag once we fall out of the loop.
 2020                  */
 2021                 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
 2022                         return(retval);
 2023         }
 2024 
 2025         /*
 2026          * If the return action hasn't been set to descend yet, that means
 2027          * we haven't seen any peripheral matching patterns.  So tell the
 2028          * caller to stop descending the tree -- the user doesn't want to
 2029          * match against lower level tree elements.
 2030          */
 2031         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
 2032                 retval |= DM_RET_STOP;
 2033 
 2034         return(retval);
 2035 }
 2036 
 2037 /*
 2038  * Match a single peripheral against any number of match patterns.
 2039  */
 2040 static dev_match_ret
 2041 xptperiphmatch(struct dev_match_pattern *patterns, u_int num_patterns,
 2042                struct cam_periph *periph)
 2043 {
 2044         dev_match_ret retval;
 2045         int i;
 2046 
 2047         /*
 2048          * If we aren't given something to match against, that's an error.
 2049          */
 2050         if (periph == NULL)
 2051                 return(DM_RET_ERROR);
 2052 
 2053         /*
 2054          * If there are no match entries, then this peripheral matches no
 2055          * matter what.
 2056          */
 2057         if ((patterns == NULL) || (num_patterns == 0))
 2058                 return(DM_RET_STOP | DM_RET_COPY);
 2059 
 2060         /*
 2061          * There aren't any nodes below a peripheral node, so there's no
 2062          * reason to descend the tree any further.
 2063          */
 2064         retval = DM_RET_STOP;
 2065 
 2066         for (i = 0; i < num_patterns; i++) {
 2067                 struct periph_match_pattern *cur_pattern;
 2068 
 2069                 /*
 2070                  * If the pattern in question isn't for a peripheral, we
 2071                  * aren't interested.
 2072                  */
 2073                 if (patterns[i].type != DEV_MATCH_PERIPH)
 2074                         continue;
 2075 
 2076                 cur_pattern = &patterns[i].pattern.periph_pattern;
 2077 
 2078                 /*
 2079                  * If they want to match on anything, then we will do so.
 2080                  */
 2081                 if (cur_pattern->flags == PERIPH_MATCH_ANY) {
 2082                         /* set the copy flag */
 2083                         retval |= DM_RET_COPY;
 2084 
 2085                         /*
 2086                          * We've already set the return action to stop,
 2087                          * since there are no nodes below peripherals in
 2088                          * the tree.
 2089                          */
 2090                         return(retval);
 2091                 }
 2092 
 2093                 /*
 2094                  * Not sure why someone would do this...
 2095                  */
 2096                 if (cur_pattern->flags == PERIPH_MATCH_NONE)
 2097                         continue;
 2098 
 2099                 if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0)
 2100                  && (cur_pattern->path_id != periph->path->bus->path_id))
 2101                         continue;
 2102 
 2103                 /*
 2104                  * For the target and lun id's, we have to make sure the
 2105                  * target and lun pointers aren't NULL.  The xpt peripheral
 2106                  * has a wildcard target and device.
 2107                  */
 2108                 if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0)
 2109                  && ((periph->path->target == NULL)
 2110                  ||(cur_pattern->target_id != periph->path->target->target_id)))
 2111                         continue;
 2112 
 2113                 if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0)
 2114                  && ((periph->path->device == NULL)
 2115                  || (cur_pattern->target_lun != periph->path->device->lun_id)))
 2116                         continue;
 2117 
 2118                 if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0)
 2119                  && (cur_pattern->unit_number != periph->unit_number))
 2120                         continue;
 2121 
 2122                 if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0)
 2123                  && (strncmp(cur_pattern->periph_name, periph->periph_name,
 2124                              DEV_IDLEN) != 0))
 2125                         continue;
 2126 
 2127                 /*
 2128                  * If we get to this point, the user definitely wants 
 2129                  * information on this peripheral.  So tell the caller to
 2130                  * copy the data out.
 2131                  */
 2132                 retval |= DM_RET_COPY;
 2133 
 2134                 /*
 2135                  * The return action has already been set to stop, since
 2136                  * peripherals don't have any nodes below them in the EDT.
 2137                  */
 2138                 return(retval);
 2139         }
 2140 
 2141         /*
 2142          * If we get to this point, the peripheral that was passed in
 2143          * doesn't match any of the patterns.
 2144          */
 2145         return(retval);
 2146 }
 2147 
 2148 static int
 2149 xptedtbusfunc(struct cam_eb *bus, void *arg)
 2150 {
 2151         struct ccb_dev_match *cdm;
 2152         dev_match_ret retval;
 2153 
 2154         cdm = (struct ccb_dev_match *)arg;
 2155 
 2156         /*
 2157          * If our position is for something deeper in the tree, that means
 2158          * that we've already seen this node.  So, we keep going down.
 2159          */
 2160         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 2161          && (cdm->pos.cookie.bus == bus)
 2162          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 2163          && (cdm->pos.cookie.target != NULL))
 2164                 retval = DM_RET_DESCEND;
 2165         else
 2166                 retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus);
 2167 
 2168         /*
 2169          * If we got an error, bail out of the search.
 2170          */
 2171         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
 2172                 cdm->status = CAM_DEV_MATCH_ERROR;
 2173                 return(0);
 2174         }
 2175 
 2176         /*
 2177          * If the copy flag is set, copy this bus out.
 2178          */
 2179         if (retval & DM_RET_COPY) {
 2180                 int spaceleft, j;
 2181 
 2182                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
 2183                         sizeof(struct dev_match_result));
 2184 
 2185                 /*
 2186                  * If we don't have enough space to put in another
 2187                  * match result, save our position and tell the
 2188                  * user there are more devices to check.
 2189                  */
 2190                 if (spaceleft < sizeof(struct dev_match_result)) {
 2191                         bzero(&cdm->pos, sizeof(cdm->pos));
 2192                         cdm->pos.position_type = 
 2193                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS;
 2194 
 2195                         cdm->pos.cookie.bus = bus;
 2196                         cdm->pos.generations[CAM_BUS_GENERATION]=
 2197                                 bus_generation;
 2198                         cdm->status = CAM_DEV_MATCH_MORE;
 2199                         return(0);
 2200                 }
 2201                 j = cdm->num_matches;
 2202                 cdm->num_matches++;
 2203                 cdm->matches[j].type = DEV_MATCH_BUS;
 2204                 cdm->matches[j].result.bus_result.path_id = bus->path_id;
 2205                 cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id;
 2206                 cdm->matches[j].result.bus_result.unit_number =
 2207                         bus->sim->unit_number;
 2208                 strncpy(cdm->matches[j].result.bus_result.dev_name,
 2209                         bus->sim->sim_name, DEV_IDLEN);
 2210         }
 2211 
 2212         /*
 2213          * If the user is only interested in busses, there's no
 2214          * reason to descend to the next level in the tree.
 2215          */
 2216         if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
 2217                 return(1);
 2218 
 2219         /*
 2220          * If there is a target generation recorded, check it to
 2221          * make sure the target list hasn't changed.
 2222          */
 2223         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 2224          && (bus == cdm->pos.cookie.bus)
 2225          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 2226          && (cdm->pos.generations[CAM_TARGET_GENERATION] != 0)
 2227          && (cdm->pos.generations[CAM_TARGET_GENERATION] !=
 2228              bus->generation)) {
 2229                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
 2230                 return(0);
 2231         }
 2232 
 2233         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 2234          && (cdm->pos.cookie.bus == bus)
 2235          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 2236          && (cdm->pos.cookie.target != NULL))
 2237                 return(xpttargettraverse(bus,
 2238                                         (struct cam_et *)cdm->pos.cookie.target,
 2239                                          xptedttargetfunc, arg));
 2240         else
 2241                 return(xpttargettraverse(bus, NULL, xptedttargetfunc, arg));
 2242 }
 2243 
 2244 static int
 2245 xptedttargetfunc(struct cam_et *target, void *arg)
 2246 {
 2247         struct ccb_dev_match *cdm;
 2248 
 2249         cdm = (struct ccb_dev_match *)arg;
 2250 
 2251         /*
 2252          * If there is a device list generation recorded, check it to
 2253          * make sure the device list hasn't changed.
 2254          */
 2255         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 2256          && (cdm->pos.cookie.bus == target->bus)
 2257          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 2258          && (cdm->pos.cookie.target == target)
 2259          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
 2260          && (cdm->pos.generations[CAM_DEV_GENERATION] != 0)
 2261          && (cdm->pos.generations[CAM_DEV_GENERATION] !=
 2262              target->generation)) {
 2263                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
 2264                 return(0);
 2265         }
 2266 
 2267         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 2268          && (cdm->pos.cookie.bus == target->bus)
 2269          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 2270          && (cdm->pos.cookie.target == target)
 2271          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
 2272          && (cdm->pos.cookie.device != NULL))
 2273                 return(xptdevicetraverse(target,
 2274                                         (struct cam_ed *)cdm->pos.cookie.device,
 2275                                          xptedtdevicefunc, arg));
 2276         else
 2277                 return(xptdevicetraverse(target, NULL, xptedtdevicefunc, arg));
 2278 }
 2279 
 2280 static int
 2281 xptedtdevicefunc(struct cam_ed *device, void *arg)
 2282 {
 2283 
 2284         struct ccb_dev_match *cdm;
 2285         dev_match_ret retval;
 2286 
 2287         cdm = (struct ccb_dev_match *)arg;
 2288 
 2289         /*
 2290          * If our position is for something deeper in the tree, that means
 2291          * that we've already seen this node.  So, we keep going down.
 2292          */
 2293         if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE)
 2294          && (cdm->pos.cookie.device == device)
 2295          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
 2296          && (cdm->pos.cookie.periph != NULL))
 2297                 retval = DM_RET_DESCEND;
 2298         else
 2299                 retval = xptdevicematch(cdm->patterns, cdm->num_patterns,
 2300                                         device);
 2301 
 2302         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
 2303                 cdm->status = CAM_DEV_MATCH_ERROR;
 2304                 return(0);
 2305         }
 2306 
 2307         /*
 2308          * If the copy flag is set, copy this device out.
 2309          */
 2310         if (retval & DM_RET_COPY) {
 2311                 int spaceleft, j;
 2312 
 2313                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
 2314                         sizeof(struct dev_match_result));
 2315 
 2316                 /*
 2317                  * If we don't have enough space to put in another
 2318                  * match result, save our position and tell the
 2319                  * user there are more devices to check.
 2320                  */
 2321                 if (spaceleft < sizeof(struct dev_match_result)) {
 2322                         bzero(&cdm->pos, sizeof(cdm->pos));
 2323                         cdm->pos.position_type = 
 2324                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
 2325                                 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE;
 2326 
 2327                         cdm->pos.cookie.bus = device->target->bus;
 2328                         cdm->pos.generations[CAM_BUS_GENERATION]=
 2329                                 bus_generation;
 2330                         cdm->pos.cookie.target = device->target;
 2331                         cdm->pos.generations[CAM_TARGET_GENERATION] =
 2332                                 device->target->bus->generation;
 2333                         cdm->pos.cookie.device = device;
 2334                         cdm->pos.generations[CAM_DEV_GENERATION] = 
 2335                                 device->target->generation;
 2336                         cdm->status = CAM_DEV_MATCH_MORE;
 2337                         return(0);
 2338                 }
 2339                 j = cdm->num_matches;
 2340                 cdm->num_matches++;
 2341                 cdm->matches[j].type = DEV_MATCH_DEVICE;
 2342                 cdm->matches[j].result.device_result.path_id =
 2343                         device->target->bus->path_id;
 2344                 cdm->matches[j].result.device_result.target_id =
 2345                         device->target->target_id;
 2346                 cdm->matches[j].result.device_result.target_lun =
 2347                         device->lun_id;
 2348                 bcopy(&device->inq_data,
 2349                       &cdm->matches[j].result.device_result.inq_data,
 2350                       sizeof(struct scsi_inquiry_data));
 2351 
 2352                 /* Let the user know whether this device is unconfigured */
 2353                 if (device->flags & CAM_DEV_UNCONFIGURED)
 2354                         cdm->matches[j].result.device_result.flags =
 2355                                 DEV_RESULT_UNCONFIGURED;
 2356                 else
 2357                         cdm->matches[j].result.device_result.flags =
 2358                                 DEV_RESULT_NOFLAG;
 2359         }
 2360 
 2361         /*
 2362          * If the user isn't interested in peripherals, don't descend
 2363          * the tree any further.
 2364          */
 2365         if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
 2366                 return(1);
 2367 
 2368         /*
 2369          * If there is a peripheral list generation recorded, make sure
 2370          * it hasn't changed.
 2371          */
 2372         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 2373          && (device->target->bus == cdm->pos.cookie.bus)
 2374          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 2375          && (device->target == cdm->pos.cookie.target)
 2376          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
 2377          && (device == cdm->pos.cookie.device)
 2378          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
 2379          && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
 2380          && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
 2381              device->generation)){
 2382                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
 2383                 return(0);
 2384         }
 2385 
 2386         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 2387          && (cdm->pos.cookie.bus == device->target->bus)
 2388          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 2389          && (cdm->pos.cookie.target == device->target)
 2390          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
 2391          && (cdm->pos.cookie.device == device)
 2392          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
 2393          && (cdm->pos.cookie.periph != NULL))
 2394                 return(xptperiphtraverse(device,
 2395                                 (struct cam_periph *)cdm->pos.cookie.periph,
 2396                                 xptedtperiphfunc, arg));
 2397         else
 2398                 return(xptperiphtraverse(device, NULL, xptedtperiphfunc, arg));
 2399 }
 2400 
 2401 static int
 2402 xptedtperiphfunc(struct cam_periph *periph, void *arg)
 2403 {
 2404         struct ccb_dev_match *cdm;
 2405         dev_match_ret retval;
 2406 
 2407         cdm = (struct ccb_dev_match *)arg;
 2408 
 2409         retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
 2410 
 2411         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
 2412                 cdm->status = CAM_DEV_MATCH_ERROR;
 2413                 return(0);
 2414         }
 2415 
 2416         /*
 2417          * If the copy flag is set, copy this peripheral out.
 2418          */
 2419         if (retval & DM_RET_COPY) {
 2420                 int spaceleft, j;
 2421 
 2422                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
 2423                         sizeof(struct dev_match_result));
 2424 
 2425                 /*
 2426                  * If we don't have enough space to put in another
 2427                  * match result, save our position and tell the
 2428                  * user there are more devices to check.
 2429                  */
 2430                 if (spaceleft < sizeof(struct dev_match_result)) {
 2431                         bzero(&cdm->pos, sizeof(cdm->pos));
 2432                         cdm->pos.position_type = 
 2433                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
 2434                                 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE |
 2435                                 CAM_DEV_POS_PERIPH;
 2436 
 2437                         cdm->pos.cookie.bus = periph->path->bus;
 2438                         cdm->pos.generations[CAM_BUS_GENERATION]=
 2439                                 bus_generation;
 2440                         cdm->pos.cookie.target = periph->path->target;
 2441                         cdm->pos.generations[CAM_TARGET_GENERATION] =
 2442                                 periph->path->bus->generation;
 2443                         cdm->pos.cookie.device = periph->path->device;
 2444                         cdm->pos.generations[CAM_DEV_GENERATION] = 
 2445                                 periph->path->target->generation;
 2446                         cdm->pos.cookie.periph = periph;
 2447                         cdm->pos.generations[CAM_PERIPH_GENERATION] =
 2448                                 periph->path->device->generation;
 2449                         cdm->status = CAM_DEV_MATCH_MORE;
 2450                         return(0);
 2451                 }
 2452 
 2453                 j = cdm->num_matches;
 2454                 cdm->num_matches++;
 2455                 cdm->matches[j].type = DEV_MATCH_PERIPH;
 2456                 cdm->matches[j].result.periph_result.path_id =
 2457                         periph->path->bus->path_id;
 2458                 cdm->matches[j].result.periph_result.target_id =
 2459                         periph->path->target->target_id;
 2460                 cdm->matches[j].result.periph_result.target_lun =
 2461                         periph->path->device->lun_id;
 2462                 cdm->matches[j].result.periph_result.unit_number =
 2463                         periph->unit_number;
 2464                 strncpy(cdm->matches[j].result.periph_result.periph_name,
 2465                         periph->periph_name, DEV_IDLEN);
 2466         }
 2467 
 2468         return(1);
 2469 }
 2470 
 2471 static int
 2472 xptedtmatch(struct ccb_dev_match *cdm)
 2473 {
 2474         int ret;
 2475 
 2476         cdm->num_matches = 0;
 2477 
 2478         /*
 2479          * Check the bus list generation.  If it has changed, the user
 2480          * needs to reset everything and start over.
 2481          */
 2482         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 2483          && (cdm->pos.generations[CAM_BUS_GENERATION] != 0)
 2484          && (cdm->pos.generations[CAM_BUS_GENERATION] != bus_generation)) {
 2485                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
 2486                 return(0);
 2487         }
 2488 
 2489         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 2490          && (cdm->pos.cookie.bus != NULL))
 2491                 ret = xptbustraverse((struct cam_eb *)cdm->pos.cookie.bus,
 2492                                      xptedtbusfunc, cdm);
 2493         else
 2494                 ret = xptbustraverse(NULL, xptedtbusfunc, cdm);
 2495 
 2496         /*
 2497          * If we get back 0, that means that we had to stop before fully
 2498          * traversing the EDT.  It also means that one of the subroutines
 2499          * has set the status field to the proper value.  If we get back 1,
 2500          * we've fully traversed the EDT and copied out any matching entries.
 2501          */
 2502         if (ret == 1)
 2503                 cdm->status = CAM_DEV_MATCH_LAST;
 2504 
 2505         return(ret);
 2506 }
 2507 
 2508 static int
 2509 xptplistpdrvfunc(struct periph_driver **pdrv, void *arg)
 2510 {
 2511         struct ccb_dev_match *cdm;
 2512 
 2513         cdm = (struct ccb_dev_match *)arg;
 2514 
 2515         if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
 2516          && (cdm->pos.cookie.pdrv == pdrv)
 2517          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
 2518          && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
 2519          && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
 2520              (*pdrv)->generation)) {
 2521                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
 2522                 return(0);
 2523         }
 2524 
 2525         if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
 2526          && (cdm->pos.cookie.pdrv == pdrv)
 2527          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
 2528          && (cdm->pos.cookie.periph != NULL))
 2529                 return(xptpdperiphtraverse(pdrv,
 2530                                 (struct cam_periph *)cdm->pos.cookie.periph,
 2531                                 xptplistperiphfunc, arg));
 2532         else
 2533                 return(xptpdperiphtraverse(pdrv, NULL,xptplistperiphfunc, arg));
 2534 }
 2535 
 2536 static int
 2537 xptplistperiphfunc(struct cam_periph *periph, void *arg)
 2538 {
 2539         struct ccb_dev_match *cdm;
 2540         dev_match_ret retval;
 2541 
 2542         cdm = (struct ccb_dev_match *)arg;
 2543 
 2544         retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
 2545 
 2546         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
 2547                 cdm->status = CAM_DEV_MATCH_ERROR;
 2548                 return(0);
 2549         }
 2550 
 2551         /*
 2552          * If the copy flag is set, copy this peripheral out.
 2553          */
 2554         if (retval & DM_RET_COPY) {
 2555                 int spaceleft, j;
 2556 
 2557                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
 2558                         sizeof(struct dev_match_result));
 2559 
 2560                 /*
 2561                  * If we don't have enough space to put in another
 2562                  * match result, save our position and tell the
 2563                  * user there are more devices to check.
 2564                  */
 2565                 if (spaceleft < sizeof(struct dev_match_result)) {
 2566                         struct periph_driver **pdrv;
 2567 
 2568                         pdrv = NULL;
 2569                         bzero(&cdm->pos, sizeof(cdm->pos));
 2570                         cdm->pos.position_type = 
 2571                                 CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR |
 2572                                 CAM_DEV_POS_PERIPH;
 2573 
 2574                         /*
 2575                          * This may look a bit non-sensical, but it is
 2576                          * actually quite logical.  There are very few
 2577                          * peripheral drivers, and bloating every peripheral
 2578                          * structure with a pointer back to its parent
 2579                          * peripheral driver linker set entry would cost
 2580                          * more in the long run than doing this quick lookup.
 2581                          */
 2582                         for (pdrv = periph_drivers; *pdrv != NULL; pdrv++) {
 2583                                 if (strcmp((*pdrv)->driver_name,
 2584                                     periph->periph_name) == 0)
 2585                                         break;
 2586                         }
 2587 
 2588                         if (*pdrv == NULL) {
 2589                                 cdm->status = CAM_DEV_MATCH_ERROR;
 2590                                 return(0);
 2591                         }
 2592 
 2593                         cdm->pos.cookie.pdrv = pdrv;
 2594                         /*
 2595                          * The periph generation slot does double duty, as
 2596                          * does the periph pointer slot.  They are used for
 2597                          * both edt and pdrv lookups and positioning.
 2598                          */
 2599                         cdm->pos.cookie.periph = periph;
 2600                         cdm->pos.generations[CAM_PERIPH_GENERATION] =
 2601                                 (*pdrv)->generation;
 2602                         cdm->status = CAM_DEV_MATCH_MORE;
 2603                         return(0);
 2604                 }
 2605 
 2606                 j = cdm->num_matches;
 2607                 cdm->num_matches++;
 2608                 cdm->matches[j].type = DEV_MATCH_PERIPH;
 2609                 cdm->matches[j].result.periph_result.path_id =
 2610                         periph->path->bus->path_id;
 2611 
 2612                 /*
 2613                  * The transport layer peripheral doesn't have a target or
 2614                  * lun.
 2615                  */
 2616                 if (periph->path->target)
 2617                         cdm->matches[j].result.periph_result.target_id =
 2618                                 periph->path->target->target_id;
 2619                 else
 2620                         cdm->matches[j].result.periph_result.target_id = -1;
 2621 
 2622                 if (periph->path->device)
 2623                         cdm->matches[j].result.periph_result.target_lun =
 2624                                 periph->path->device->lun_id;
 2625                 else
 2626                         cdm->matches[j].result.periph_result.target_lun = -1;
 2627 
 2628                 cdm->matches[j].result.periph_result.unit_number =
 2629                         periph->unit_number;
 2630                 strncpy(cdm->matches[j].result.periph_result.periph_name,
 2631                         periph->periph_name, DEV_IDLEN);
 2632         }
 2633 
 2634         return(1);
 2635 }
 2636 
 2637 static int
 2638 xptperiphlistmatch(struct ccb_dev_match *cdm)
 2639 {
 2640         int ret;
 2641 
 2642         cdm->num_matches = 0;
 2643 
 2644         /*
 2645          * At this point in the edt traversal function, we check the bus
 2646          * list generation to make sure that no busses have been added or
 2647          * removed since the user last sent a XPT_DEV_MATCH ccb through.
 2648          * For the peripheral driver list traversal function, however, we
 2649          * don't have to worry about new peripheral driver types coming or
 2650          * going; they're in a linker set, and therefore can't change
 2651          * without a recompile.
 2652          */
 2653 
 2654         if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
 2655          && (cdm->pos.cookie.pdrv != NULL))
 2656                 ret = xptpdrvtraverse(
 2657                                 (struct periph_driver **)cdm->pos.cookie.pdrv,
 2658                                 xptplistpdrvfunc, cdm);
 2659         else
 2660                 ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm);
 2661 
 2662         /*
 2663          * If we get back 0, that means that we had to stop before fully
 2664          * traversing the peripheral driver tree.  It also means that one of
 2665          * the subroutines has set the status field to the proper value.  If
 2666          * we get back 1, we've fully traversed the EDT and copied out any
 2667          * matching entries.
 2668          */
 2669         if (ret == 1)
 2670                 cdm->status = CAM_DEV_MATCH_LAST;
 2671 
 2672         return(ret);
 2673 }
 2674 
 2675 static int
 2676 xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg)
 2677 {
 2678         struct cam_eb *bus, *next_bus;
 2679         int retval;
 2680 
 2681         retval = 1;
 2682 
 2683         for (bus = (start_bus ? start_bus : TAILQ_FIRST(&xpt_busses));
 2684              bus != NULL;
 2685              bus = next_bus) {
 2686                 next_bus = TAILQ_NEXT(bus, links);
 2687 
 2688                 retval = tr_func(bus, arg);
 2689                 if (retval == 0)
 2690                         return(retval);
 2691         }
 2692 
 2693         return(retval);
 2694 }
 2695 
 2696 static int
 2697 xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target,
 2698                   xpt_targetfunc_t *tr_func, void *arg)
 2699 {
 2700         struct cam_et *target, *next_target;
 2701         int retval;
 2702 
 2703         retval = 1;
 2704         for (target = (start_target ? start_target :
 2705                        TAILQ_FIRST(&bus->et_entries));
 2706              target != NULL; target = next_target) {
 2707 
 2708                 next_target = TAILQ_NEXT(target, links);
 2709 
 2710                 retval = tr_func(target, arg);
 2711 
 2712                 if (retval == 0)
 2713                         return(retval);
 2714         }
 2715 
 2716         return(retval);
 2717 }
 2718 
 2719 static int
 2720 xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device,
 2721                   xpt_devicefunc_t *tr_func, void *arg)
 2722 {
 2723         struct cam_ed *device, *next_device;
 2724         int retval;
 2725 
 2726         retval = 1;
 2727         for (device = (start_device ? start_device :
 2728                        TAILQ_FIRST(&target->ed_entries));
 2729              device != NULL;
 2730              device = next_device) {
 2731 
 2732                 next_device = TAILQ_NEXT(device, links);
 2733 
 2734                 retval = tr_func(device, arg);
 2735 
 2736                 if (retval == 0)
 2737                         return(retval);
 2738         }
 2739 
 2740         return(retval);
 2741 }
 2742 
 2743 static int
 2744 xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph,
 2745                   xpt_periphfunc_t *tr_func, void *arg)
 2746 {
 2747         struct cam_periph *periph, *next_periph;
 2748         int retval;
 2749 
 2750         retval = 1;
 2751 
 2752         for (periph = (start_periph ? start_periph :
 2753                        SLIST_FIRST(&device->periphs));
 2754              periph != NULL;
 2755              periph = next_periph) {
 2756 
 2757                 next_periph = SLIST_NEXT(periph, periph_links);
 2758 
 2759                 retval = tr_func(periph, arg);
 2760                 if (retval == 0)
 2761                         return(retval);
 2762         }
 2763 
 2764         return(retval);
 2765 }
 2766 
 2767 static int
 2768 xptpdrvtraverse(struct periph_driver **start_pdrv,
 2769                 xpt_pdrvfunc_t *tr_func, void *arg)
 2770 {
 2771         struct periph_driver **pdrv;
 2772         int retval;
 2773 
 2774         retval = 1;
 2775 
 2776         /*
 2777          * We don't traverse the peripheral driver list like we do the
 2778          * other lists, because it is a linker set, and therefore cannot be
 2779          * changed during runtime.  If the peripheral driver list is ever
 2780          * re-done to be something other than a linker set (i.e. it can
 2781          * change while the system is running), the list traversal should
 2782          * be modified to work like the other traversal functions.
 2783          */
 2784         for (pdrv = (start_pdrv ? start_pdrv : periph_drivers);
 2785              *pdrv != NULL; pdrv++) {
 2786                 retval = tr_func(pdrv, arg);
 2787 
 2788                 if (retval == 0)
 2789                         return(retval);
 2790         }
 2791 
 2792         return(retval);
 2793 }
 2794 
 2795 static int
 2796 xptpdperiphtraverse(struct periph_driver **pdrv,
 2797                     struct cam_periph *start_periph,
 2798                     xpt_periphfunc_t *tr_func, void *arg)
 2799 {
 2800         struct cam_periph *periph, *next_periph;
 2801         int retval;
 2802 
 2803         retval = 1;
 2804 
 2805         for (periph = (start_periph ? start_periph :
 2806              TAILQ_FIRST(&(*pdrv)->units)); periph != NULL;
 2807              periph = next_periph) {
 2808 
 2809                 next_periph = TAILQ_NEXT(periph, unit_links);
 2810 
 2811                 retval = tr_func(periph, arg);
 2812                 if (retval == 0)
 2813                         return(retval);
 2814         }
 2815         return(retval);
 2816 }
 2817 
 2818 static int
 2819 xptdefbusfunc(struct cam_eb *bus, void *arg)
 2820 {
 2821         struct xpt_traverse_config *tr_config;
 2822 
 2823         tr_config = (struct xpt_traverse_config *)arg;
 2824 
 2825         if (tr_config->depth == XPT_DEPTH_BUS) {
 2826                 xpt_busfunc_t *tr_func;
 2827 
 2828                 tr_func = (xpt_busfunc_t *)tr_config->tr_func;
 2829 
 2830                 return(tr_func(bus, tr_config->tr_arg));
 2831         } else
 2832                 return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg));
 2833 }
 2834 
 2835 static int
 2836 xptdeftargetfunc(struct cam_et *target, void *arg)
 2837 {
 2838         struct xpt_traverse_config *tr_config;
 2839 
 2840         tr_config = (struct xpt_traverse_config *)arg;
 2841 
 2842         if (tr_config->depth == XPT_DEPTH_TARGET) {
 2843                 xpt_targetfunc_t *tr_func;
 2844 
 2845                 tr_func = (xpt_targetfunc_t *)tr_config->tr_func;
 2846 
 2847                 return(tr_func(target, tr_config->tr_arg));
 2848         } else
 2849                 return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg));
 2850 }
 2851 
 2852 static int
 2853 xptdefdevicefunc(struct cam_ed *device, void *arg)
 2854 {
 2855         struct xpt_traverse_config *tr_config;
 2856 
 2857         tr_config = (struct xpt_traverse_config *)arg;
 2858 
 2859         if (tr_config->depth == XPT_DEPTH_DEVICE) {
 2860                 xpt_devicefunc_t *tr_func;
 2861 
 2862                 tr_func = (xpt_devicefunc_t *)tr_config->tr_func;
 2863 
 2864                 return(tr_func(device, tr_config->tr_arg));
 2865         } else
 2866                 return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg));
 2867 }
 2868 
 2869 static int
 2870 xptdefperiphfunc(struct cam_periph *periph, void *arg)
 2871 {
 2872         struct xpt_traverse_config *tr_config;
 2873         xpt_periphfunc_t *tr_func;
 2874 
 2875         tr_config = (struct xpt_traverse_config *)arg;
 2876 
 2877         tr_func = (xpt_periphfunc_t *)tr_config->tr_func;
 2878 
 2879         /*
 2880          * Unlike the other default functions, we don't check for depth
 2881          * here.  The peripheral driver level is the last level in the EDT,
 2882          * so if we're here, we should execute the function in question.
 2883          */
 2884         return(tr_func(periph, tr_config->tr_arg));
 2885 }
 2886 
 2887 /*
 2888  * Execute the given function for every bus in the EDT.
 2889  */
 2890 static int
 2891 xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg)
 2892 {
 2893         struct xpt_traverse_config tr_config;
 2894 
 2895         tr_config.depth = XPT_DEPTH_BUS;
 2896         tr_config.tr_func = tr_func;
 2897         tr_config.tr_arg = arg;
 2898 
 2899         return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
 2900 }
 2901 
 2902 #ifdef notusedyet
 2903 /*
 2904  * Execute the given function for every target in the EDT.
 2905  */
 2906 static int
 2907 xpt_for_all_targets(xpt_targetfunc_t *tr_func, void *arg)
 2908 {
 2909         struct xpt_traverse_config tr_config;
 2910 
 2911         tr_config.depth = XPT_DEPTH_TARGET;
 2912         tr_config.tr_func = tr_func;
 2913         tr_config.tr_arg = arg;
 2914 
 2915         return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
 2916 }
 2917 #endif /* notusedyet */
 2918 
 2919 /*
 2920  * Execute the given function for every device in the EDT.
 2921  */
 2922 static int
 2923 xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg)
 2924 {
 2925         struct xpt_traverse_config tr_config;
 2926 
 2927         tr_config.depth = XPT_DEPTH_DEVICE;
 2928         tr_config.tr_func = tr_func;
 2929         tr_config.tr_arg = arg;
 2930 
 2931         return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
 2932 }
 2933 
 2934 #ifdef notusedyet
 2935 /*
 2936  * Execute the given function for every peripheral in the EDT.
 2937  */
 2938 static int
 2939 xpt_for_all_periphs(xpt_periphfunc_t *tr_func, void *arg)
 2940 {
 2941         struct xpt_traverse_config tr_config;
 2942 
 2943         tr_config.depth = XPT_DEPTH_PERIPH;
 2944         tr_config.tr_func = tr_func;
 2945         tr_config.tr_arg = arg;
 2946 
 2947         return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
 2948 }
 2949 #endif /* notusedyet */
 2950 
 2951 static int
 2952 xptsetasyncfunc(struct cam_ed *device, void *arg)
 2953 {
 2954         struct cam_path path;
 2955         struct ccb_getdev cgd;
 2956         struct async_node *cur_entry;
 2957 
 2958         cur_entry = (struct async_node *)arg;
 2959 
 2960         /*
 2961          * Don't report unconfigured devices (Wildcard devs,
 2962          * devices only for target mode, device instances
 2963          * that have been invalidated but are waiting for
 2964          * their last reference count to be released).
 2965          */
 2966         if ((device->flags & CAM_DEV_UNCONFIGURED) != 0)
 2967                 return (1);
 2968 
 2969         xpt_compile_path(&path,
 2970                          NULL,
 2971                          device->target->bus->path_id,
 2972                          device->target->target_id,
 2973                          device->lun_id);
 2974         xpt_setup_ccb(&cgd.ccb_h, &path, /*priority*/1);
 2975         cgd.ccb_h.func_code = XPT_GDEV_TYPE;
 2976         xpt_action((union ccb *)&cgd);
 2977         cur_entry->callback(cur_entry->callback_arg,
 2978                             AC_FOUND_DEVICE,
 2979                             &path, &cgd);
 2980         xpt_release_path(&path);
 2981 
 2982         return(1);
 2983 }
 2984 
 2985 static int
 2986 xptsetasyncbusfunc(struct cam_eb *bus, void *arg)
 2987 {
 2988         struct cam_path path;
 2989         struct ccb_pathinq cpi;
 2990         struct async_node *cur_entry;
 2991 
 2992         cur_entry = (struct async_node *)arg;
 2993 
 2994         xpt_compile_path(&path, /*periph*/NULL,
 2995                          bus->sim->path_id,
 2996                          CAM_TARGET_WILDCARD,
 2997                          CAM_LUN_WILDCARD);
 2998         xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
 2999         cpi.ccb_h.func_code = XPT_PATH_INQ;
 3000         xpt_action((union ccb *)&cpi);
 3001         cur_entry->callback(cur_entry->callback_arg,
 3002                             AC_PATH_REGISTERED,
 3003                             &path, &cpi);
 3004         xpt_release_path(&path);
 3005 
 3006         return(1);
 3007 }
 3008 
 3009 void
 3010 xpt_action(union ccb *start_ccb)
 3011 {
 3012         int iopl;
 3013 
 3014         GIANT_REQUIRED;
 3015 
 3016         CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_action\n"));
 3017 
 3018         start_ccb->ccb_h.status = CAM_REQ_INPROG;
 3019 
 3020         iopl = splsoftcam();
 3021         switch (start_ccb->ccb_h.func_code) {
 3022         case XPT_SCSI_IO:
 3023         {
 3024 #ifdef CAM_NEW_TRAN_CODE
 3025                 struct cam_ed *device;
 3026 #endif /* CAM_NEW_TRAN_CODE */
 3027 #ifdef CAMDEBUG
 3028                 char cdb_str[(SCSI_MAX_CDBLEN * 3) + 1];
 3029                 struct cam_path *path;
 3030 
 3031                 path = start_ccb->ccb_h.path;
 3032 #endif
 3033 
 3034                 /*
 3035                  * For the sake of compatibility with SCSI-1
 3036                  * devices that may not understand the identify
 3037                  * message, we include lun information in the
 3038                  * second byte of all commands.  SCSI-1 specifies
 3039                  * that luns are a 3 bit value and reserves only 3
 3040                  * bits for lun information in the CDB.  Later
 3041                  * revisions of the SCSI spec allow for more than 8
 3042                  * luns, but have deprecated lun information in the
 3043                  * CDB.  So, if the lun won't fit, we must omit.
 3044                  *
 3045                  * Also be aware that during initial probing for devices,
 3046                  * the inquiry information is unknown but initialized to 0.
 3047                  * This means that this code will be exercised while probing
 3048                  * devices with an ANSI revision greater than 2.
 3049                  */
 3050 #ifdef CAM_NEW_TRAN_CODE
 3051                 device = start_ccb->ccb_h.path->device;
 3052                 if (device->protocol_version <= SCSI_REV_2
 3053 #else /* CAM_NEW_TRAN_CODE */
 3054                 if (SID_ANSI_REV(&start_ccb->ccb_h.path->device->inq_data) <= 2
 3055 #endif /* CAM_NEW_TRAN_CODE */
 3056                  && start_ccb->ccb_h.target_lun < 8
 3057                  && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) {
 3058 
 3059                         start_ccb->csio.cdb_io.cdb_bytes[1] |=
 3060                             start_ccb->ccb_h.target_lun << 5;
 3061                 }
 3062                 start_ccb->csio.scsi_status = SCSI_STATUS_OK;
 3063                 CAM_DEBUG(path, CAM_DEBUG_CDB,("%s. CDB: %s\n",
 3064                           scsi_op_desc(start_ccb->csio.cdb_io.cdb_bytes[0],
 3065                                        &path->device->inq_data),
 3066                           scsi_cdb_string(start_ccb->csio.cdb_io.cdb_bytes,
 3067                                           cdb_str, sizeof(cdb_str))));
 3068         }
 3069         /* FALLTHROUGH */
 3070         case XPT_TARGET_IO:
 3071         case XPT_CONT_TARGET_IO:
 3072                 start_ccb->csio.sense_resid = 0;
 3073                 start_ccb->csio.resid = 0;
 3074                 /* FALLTHROUGH */
 3075         case XPT_RESET_DEV:
 3076         case XPT_ENG_EXEC:
 3077         {
 3078                 struct cam_path *path;
 3079                 struct cam_sim *sim;
 3080                 int s;
 3081                 int runq;
 3082 
 3083                 path = start_ccb->ccb_h.path;
 3084                 s = splsoftcam();
 3085 
 3086                 sim = path->bus->sim;
 3087                 if (SIM_DEAD(sim)) {
 3088                         /* The SIM has gone; just execute the CCB directly. */
 3089                         cam_ccbq_send_ccb(&path->device->ccbq, start_ccb);
 3090                         (*(sim->sim_action))(sim, start_ccb);
 3091                         splx(s);
 3092                         break;
 3093                 }
 3094 
 3095                 cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb);
 3096                 if (path->device->qfrozen_cnt == 0)
 3097                         runq = xpt_schedule_dev_sendq(path->bus, path->device);
 3098                 else
 3099                         runq = 0;
 3100                 splx(s);
 3101                 if (runq != 0)
 3102                         xpt_run_dev_sendq(path->bus);
 3103                 break;
 3104         }
 3105         case XPT_SET_TRAN_SETTINGS:
 3106         {
 3107                 xpt_set_transfer_settings(&start_ccb->cts,
 3108                                           start_ccb->ccb_h.path->device,
 3109                                           /*async_update*/FALSE);
 3110                 break;
 3111         }
 3112         case XPT_CALC_GEOMETRY:
 3113         {
 3114                 struct cam_sim *sim;
 3115 
 3116                 /* Filter out garbage */
 3117                 if (start_ccb->ccg.block_size == 0
 3118                  || start_ccb->ccg.volume_size == 0) {
 3119                         start_ccb->ccg.cylinders = 0;
 3120                         start_ccb->ccg.heads = 0;
 3121                         start_ccb->ccg.secs_per_track = 0;
 3122                         start_ccb->ccb_h.status = CAM_REQ_CMP;
 3123                         break;
 3124                 }
 3125 #ifdef PC98
 3126                 /*
 3127                  * In a PC-98 system, geometry translation depens on
 3128                  * the "real" device geometry obtained from mode page 4.
 3129                  * SCSI geometry translation is performed in the
 3130                  * initialization routine of the SCSI BIOS and the result
 3131                  * stored in host memory.  If the translation is available
 3132                  * in host memory, use it.  If not, rely on the default
 3133                  * translation the device driver performs.
 3134                  */
 3135                 if (scsi_da_bios_params(&start_ccb->ccg) != 0) {
 3136                         start_ccb->ccb_h.status = CAM_REQ_CMP;
 3137                         break;
 3138                 }
 3139 #endif
 3140                 sim = start_ccb->ccb_h.path->bus->sim;
 3141                 (*(sim->sim_action))(sim, start_ccb);
 3142                 break;
 3143         }
 3144         case XPT_ABORT:
 3145         {
 3146                 union ccb* abort_ccb;
 3147                 int s;                          
 3148 
 3149                 abort_ccb = start_ccb->cab.abort_ccb;
 3150                 if (XPT_FC_IS_DEV_QUEUED(abort_ccb)) {
 3151 
 3152                         if (abort_ccb->ccb_h.pinfo.index >= 0) {
 3153                                 struct cam_ccbq *ccbq;
 3154 
 3155                                 ccbq = &abort_ccb->ccb_h.path->device->ccbq;
 3156                                 cam_ccbq_remove_ccb(ccbq, abort_ccb);
 3157                                 abort_ccb->ccb_h.status =
 3158                                     CAM_REQ_ABORTED|CAM_DEV_QFRZN;
 3159                                 xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
 3160                                 s = splcam();
 3161                                 xpt_done(abort_ccb);
 3162                                 splx(s);
 3163                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 3164                                 break;
 3165                         }
 3166                         if (abort_ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX
 3167                          && (abort_ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) {
 3168                                 /*
 3169                                  * We've caught this ccb en route to
 3170                                  * the SIM.  Flag it for abort and the
 3171                                  * SIM will do so just before starting
 3172                                  * real work on the CCB.
 3173                                  */
 3174                                 abort_ccb->ccb_h.status =
 3175                                     CAM_REQ_ABORTED|CAM_DEV_QFRZN;
 3176                                 xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
 3177                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 3178                                 break;
 3179                         }
 3180                 } 
 3181                 if (XPT_FC_IS_QUEUED(abort_ccb)
 3182                  && (abort_ccb->ccb_h.pinfo.index == CAM_DONEQ_INDEX)) {
 3183                         /*
 3184                          * It's already completed but waiting
 3185                          * for our SWI to get to it.
 3186                          */
 3187                         start_ccb->ccb_h.status = CAM_UA_ABORT;
 3188                         break;
 3189                 }
 3190                 /*
 3191                  * If we weren't able to take care of the abort request
 3192                  * in the XPT, pass the request down to the SIM for processing.
 3193                  */
 3194         }
 3195         /* FALLTHROUGH */
 3196         case XPT_ACCEPT_TARGET_IO:
 3197         case XPT_EN_LUN:
 3198         case XPT_IMMED_NOTIFY:
 3199         case XPT_NOTIFY_ACK:
 3200         case XPT_GET_TRAN_SETTINGS:
 3201         case XPT_RESET_BUS:
 3202         {
 3203                 struct cam_sim *sim;
 3204 
 3205                 sim = start_ccb->ccb_h.path->bus->sim;
 3206                 (*(sim->sim_action))(sim, start_ccb);
 3207                 break;
 3208         }
 3209         case XPT_PATH_INQ:
 3210         {
 3211                 struct cam_sim *sim;
 3212 
 3213                 sim = start_ccb->ccb_h.path->bus->sim;
 3214                 (*(sim->sim_action))(sim, start_ccb);
 3215                 break;
 3216         }
 3217         case XPT_PATH_STATS:
 3218                 start_ccb->cpis.last_reset =
 3219                         start_ccb->ccb_h.path->bus->last_reset;
 3220                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 3221                 break;
 3222         case XPT_GDEV_TYPE:
 3223         {
 3224                 struct cam_ed *dev;
 3225                 int s;
 3226 
 3227                 dev = start_ccb->ccb_h.path->device;
 3228                 s = splcam();
 3229                 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
 3230                         start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
 3231                 } else {
 3232                         struct ccb_getdev *cgd;
 3233                         struct cam_eb *bus;
 3234                         struct cam_et *tar;
 3235 
 3236                         cgd = &start_ccb->cgd;
 3237                         bus = cgd->ccb_h.path->bus;
 3238                         tar = cgd->ccb_h.path->target;
 3239                         cgd->inq_data = dev->inq_data;
 3240                         cgd->ccb_h.status = CAM_REQ_CMP;
 3241                         cgd->serial_num_len = dev->serial_num_len;
 3242                         if ((dev->serial_num_len > 0)
 3243                          && (dev->serial_num != NULL))
 3244                                 bcopy(dev->serial_num, cgd->serial_num,
 3245                                       dev->serial_num_len);
 3246                 }
 3247                 splx(s);
 3248                 break; 
 3249         }
 3250         case XPT_GDEV_STATS:
 3251         {
 3252                 struct cam_ed *dev;
 3253                 int s;
 3254 
 3255                 dev = start_ccb->ccb_h.path->device;
 3256                 s = splcam();
 3257                 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
 3258                         start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
 3259                 } else {
 3260                         struct ccb_getdevstats *cgds;
 3261                         struct cam_eb *bus;
 3262                         struct cam_et *tar;
 3263 
 3264                         cgds = &start_ccb->cgds;
 3265                         bus = cgds->ccb_h.path->bus;
 3266                         tar = cgds->ccb_h.path->target;
 3267                         cgds->dev_openings = dev->ccbq.dev_openings;
 3268                         cgds->dev_active = dev->ccbq.dev_active;
 3269                         cgds->devq_openings = dev->ccbq.devq_openings;
 3270                         cgds->devq_queued = dev->ccbq.queue.entries;
 3271                         cgds->held = dev->ccbq.held;
 3272                         cgds->last_reset = tar->last_reset;
 3273                         cgds->maxtags = dev->quirk->maxtags;
 3274                         cgds->mintags = dev->quirk->mintags;
 3275                         if (timevalcmp(&tar->last_reset, &bus->last_reset, <))
 3276                                 cgds->last_reset = bus->last_reset;
 3277                         cgds->ccb_h.status = CAM_REQ_CMP;
 3278                 }
 3279                 splx(s);
 3280                 break;
 3281         }
 3282         case XPT_GDEVLIST:
 3283         {
 3284                 struct cam_periph       *nperiph;
 3285                 struct periph_list      *periph_head;
 3286                 struct ccb_getdevlist   *cgdl;
 3287                 u_int                   i;
 3288                 int                     s;
 3289                 struct cam_ed           *device;
 3290                 int                     found;
 3291 
 3292 
 3293                 found = 0;
 3294 
 3295                 /*
 3296                  * Don't want anyone mucking with our data.
 3297                  */
 3298                 s = splcam();
 3299                 device = start_ccb->ccb_h.path->device;
 3300                 periph_head = &device->periphs;
 3301                 cgdl = &start_ccb->cgdl;
 3302 
 3303                 /*
 3304                  * Check and see if the list has changed since the user
 3305                  * last requested a list member.  If so, tell them that the
 3306                  * list has changed, and therefore they need to start over 
 3307                  * from the beginning.
 3308                  */
 3309                 if ((cgdl->index != 0) && 
 3310                     (cgdl->generation != device->generation)) {
 3311                         cgdl->status = CAM_GDEVLIST_LIST_CHANGED;
 3312                         splx(s);
 3313                         break;
 3314                 }
 3315 
 3316                 /*
 3317                  * Traverse the list of peripherals and attempt to find 
 3318                  * the requested peripheral.
 3319                  */
 3320                 for (nperiph = SLIST_FIRST(periph_head), i = 0;
 3321                      (nperiph != NULL) && (i <= cgdl->index);
 3322                      nperiph = SLIST_NEXT(nperiph, periph_links), i++) {
 3323                         if (i == cgdl->index) {
 3324                                 strncpy(cgdl->periph_name,
 3325                                         nperiph->periph_name,
 3326                                         DEV_IDLEN);
 3327                                 cgdl->unit_number = nperiph->unit_number;
 3328                                 found = 1;
 3329                         }
 3330                 }
 3331                 if (found == 0) {
 3332                         cgdl->status = CAM_GDEVLIST_ERROR;
 3333                         splx(s);
 3334                         break;
 3335                 }
 3336 
 3337                 if (nperiph == NULL)
 3338                         cgdl->status = CAM_GDEVLIST_LAST_DEVICE;
 3339                 else
 3340                         cgdl->status = CAM_GDEVLIST_MORE_DEVS;
 3341 
 3342                 cgdl->index++;
 3343                 cgdl->generation = device->generation;
 3344 
 3345                 splx(s);
 3346                 cgdl->ccb_h.status = CAM_REQ_CMP;
 3347                 break;
 3348         }
 3349         case XPT_DEV_MATCH:
 3350         {
 3351                 int s;
 3352                 dev_pos_type position_type;
 3353                 struct ccb_dev_match *cdm;
 3354 
 3355                 cdm = &start_ccb->cdm;
 3356 
 3357                 /*
 3358                  * Prevent EDT changes while we traverse it.
 3359                  */
 3360                 s = splcam();
 3361                 /*
 3362                  * There are two ways of getting at information in the EDT.
 3363                  * The first way is via the primary EDT tree.  It starts
 3364                  * with a list of busses, then a list of targets on a bus,
 3365                  * then devices/luns on a target, and then peripherals on a
 3366                  * device/lun.  The "other" way is by the peripheral driver
 3367                  * lists.  The peripheral driver lists are organized by
 3368                  * peripheral driver.  (obviously)  So it makes sense to
 3369                  * use the peripheral driver list if the user is looking
 3370                  * for something like "da1", or all "da" devices.  If the
 3371                  * user is looking for something on a particular bus/target
 3372                  * or lun, it's generally better to go through the EDT tree.
 3373                  */
 3374 
 3375                 if (cdm->pos.position_type != CAM_DEV_POS_NONE)
 3376                         position_type = cdm->pos.position_type;
 3377                 else {
 3378                         u_int i;
 3379 
 3380                         position_type = CAM_DEV_POS_NONE;
 3381 
 3382                         for (i = 0; i < cdm->num_patterns; i++) {
 3383                                 if ((cdm->patterns[i].type == DEV_MATCH_BUS)
 3384                                  ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){
 3385                                         position_type = CAM_DEV_POS_EDT;
 3386                                         break;
 3387                                 }
 3388                         }
 3389 
 3390                         if (cdm->num_patterns == 0)
 3391                                 position_type = CAM_DEV_POS_EDT;
 3392                         else if (position_type == CAM_DEV_POS_NONE)
 3393                                 position_type = CAM_DEV_POS_PDRV;
 3394                 }
 3395 
 3396                 switch(position_type & CAM_DEV_POS_TYPEMASK) {
 3397                 case CAM_DEV_POS_EDT:
 3398                         xptedtmatch(cdm);
 3399                         break;
 3400                 case CAM_DEV_POS_PDRV:
 3401                         xptperiphlistmatch(cdm);
 3402                         break;
 3403                 default:
 3404                         cdm->status = CAM_DEV_MATCH_ERROR;
 3405                         break;
 3406                 }
 3407 
 3408                 splx(s);
 3409 
 3410                 if (cdm->status == CAM_DEV_MATCH_ERROR)
 3411                         start_ccb->ccb_h.status = CAM_REQ_CMP_ERR;
 3412                 else
 3413                         start_ccb->ccb_h.status = CAM_REQ_CMP;
 3414 
 3415                 break;
 3416         }
 3417         case XPT_SASYNC_CB:
 3418         {
 3419                 struct ccb_setasync *csa;
 3420                 struct async_node *cur_entry;
 3421                 struct async_list *async_head;
 3422                 u_int32_t added;
 3423                 int s;
 3424 
 3425                 csa = &start_ccb->csa;
 3426                 added = csa->event_enable;
 3427                 async_head = &csa->ccb_h.path->device->asyncs;
 3428 
 3429                 /*
 3430                  * If there is already an entry for us, simply
 3431                  * update it.
 3432                  */
 3433                 s = splcam();
 3434                 cur_entry = SLIST_FIRST(async_head);
 3435                 while (cur_entry != NULL) {
 3436                         if ((cur_entry->callback_arg == csa->callback_arg)
 3437                          && (cur_entry->callback == csa->callback))
 3438                                 break;
 3439                         cur_entry = SLIST_NEXT(cur_entry, links);
 3440                 }
 3441 
 3442                 if (cur_entry != NULL) {
 3443                         /*
 3444                          * If the request has no flags set,
 3445                          * remove the entry.
 3446                          */
 3447                         added &= ~cur_entry->event_enable;
 3448                         if (csa->event_enable == 0) {
 3449                                 SLIST_REMOVE(async_head, cur_entry,
 3450                                              async_node, links);
 3451                                 csa->ccb_h.path->device->refcount--;
 3452                                 free(cur_entry, M_CAMXPT);
 3453                         } else {
 3454                                 cur_entry->event_enable = csa->event_enable;
 3455                         }
 3456                 } else {
 3457                         cur_entry = malloc(sizeof(*cur_entry), M_CAMXPT,
 3458                                            M_NOWAIT);
 3459                         if (cur_entry == NULL) {
 3460                                 splx(s);
 3461                                 csa->ccb_h.status = CAM_RESRC_UNAVAIL;
 3462                                 break;
 3463                         }
 3464                         cur_entry->event_enable = csa->event_enable;
 3465                         cur_entry->callback_arg = csa->callback_arg;
 3466                         cur_entry->callback = csa->callback;
 3467                         SLIST_INSERT_HEAD(async_head, cur_entry, links);
 3468                         csa->ccb_h.path->device->refcount++;
 3469                 }
 3470 
 3471                 if ((added & AC_FOUND_DEVICE) != 0) {
 3472                         /*
 3473                          * Get this peripheral up to date with all
 3474                          * the currently existing devices.
 3475                          */
 3476                         xpt_for_all_devices(xptsetasyncfunc, cur_entry);
 3477                 }
 3478                 if ((added & AC_PATH_REGISTERED) != 0) {
 3479                         /*
 3480                          * Get this peripheral up to date with all
 3481                          * the currently existing busses.
 3482                          */
 3483                         xpt_for_all_busses(xptsetasyncbusfunc, cur_entry);
 3484                 }
 3485                 splx(s);
 3486                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 3487                 break;
 3488         }
 3489         case XPT_REL_SIMQ:
 3490         {
 3491                 struct ccb_relsim *crs;
 3492                 struct cam_ed *dev;
 3493                 int s;
 3494 
 3495                 crs = &start_ccb->crs;
 3496                 dev = crs->ccb_h.path->device;
 3497                 if (dev == NULL) {
 3498 
 3499                         crs->ccb_h.status = CAM_DEV_NOT_THERE;
 3500                         break;
 3501                 }
 3502 
 3503                 s = splcam();
 3504 
 3505                 if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) {
 3506 
 3507                         if (INQ_DATA_TQ_ENABLED(&dev->inq_data)) {
 3508                                 /* Don't ever go below one opening */
 3509                                 if (crs->openings > 0) {
 3510                                         xpt_dev_ccbq_resize(crs->ccb_h.path,
 3511                                                             crs->openings);
 3512 
 3513                                         if (bootverbose) {
 3514                                                 xpt_print_path(crs->ccb_h.path);
 3515                                                 printf("tagged openings "
 3516                                                        "now %d\n",
 3517                                                        crs->openings);
 3518                                         }
 3519                                 }
 3520                         }
 3521                 }
 3522 
 3523                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) {
 3524 
 3525                         if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
 3526 
 3527                                 /*
 3528                                  * Just extend the old timeout and decrement
 3529                                  * the freeze count so that a single timeout
 3530                                  * is sufficient for releasing the queue.
 3531                                  */
 3532                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
 3533                                 untimeout(xpt_release_devq_timeout,
 3534                                           dev, dev->c_handle);
 3535                         } else {
 3536 
 3537                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
 3538                         }
 3539 
 3540                         dev->c_handle =
 3541                                 timeout(xpt_release_devq_timeout,
 3542                                         dev,
 3543                                         (crs->release_timeout * hz) / 1000);
 3544 
 3545                         dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING;
 3546 
 3547                 }
 3548 
 3549                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) {
 3550 
 3551                         if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) {
 3552                                 /*
 3553                                  * Decrement the freeze count so that a single
 3554                                  * completion is still sufficient to unfreeze
 3555                                  * the queue.
 3556                                  */
 3557                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
 3558                         } else {
 3559                                 
 3560                                 dev->flags |= CAM_DEV_REL_ON_COMPLETE;
 3561                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
 3562                         }
 3563                 }
 3564 
 3565                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) {
 3566 
 3567                         if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
 3568                          || (dev->ccbq.dev_active == 0)) {
 3569 
 3570                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
 3571                         } else {
 3572                                 
 3573                                 dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY;
 3574                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
 3575                         }
 3576                 }
 3577                 splx(s);
 3578                 
 3579                 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0) {
 3580 
 3581                         xpt_release_devq(crs->ccb_h.path, /*count*/1,
 3582                                          /*run_queue*/TRUE);
 3583                 }
 3584                 start_ccb->crs.qfrozen_cnt = dev->qfrozen_cnt;
 3585                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 3586                 break;
 3587         }
 3588         case XPT_SCAN_BUS:
 3589                 xpt_scan_bus(start_ccb->ccb_h.path->periph, start_ccb);
 3590                 break;
 3591         case XPT_SCAN_LUN:
 3592                 xpt_scan_lun(start_ccb->ccb_h.path->periph,
 3593                              start_ccb->ccb_h.path, start_ccb->crcn.flags,
 3594                              start_ccb);
 3595                 break;
 3596         case XPT_DEBUG: {
 3597 #ifdef CAMDEBUG
 3598                 int s;
 3599                 
 3600                 s = splcam();
 3601 #ifdef CAM_DEBUG_DELAY
 3602                 cam_debug_delay = CAM_DEBUG_DELAY;
 3603 #endif
 3604                 cam_dflags = start_ccb->cdbg.flags;
 3605                 if (cam_dpath != NULL) {
 3606                         xpt_free_path(cam_dpath);
 3607                         cam_dpath = NULL;
 3608                 }
 3609 
 3610                 if (cam_dflags != CAM_DEBUG_NONE) {
 3611                         if (xpt_create_path(&cam_dpath, xpt_periph,
 3612                                             start_ccb->ccb_h.path_id,
 3613                                             start_ccb->ccb_h.target_id,
 3614                                             start_ccb->ccb_h.target_lun) !=
 3615                                             CAM_REQ_CMP) {
 3616                                 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
 3617                                 cam_dflags = CAM_DEBUG_NONE;
 3618                         } else {
 3619                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 3620                                 xpt_print_path(cam_dpath);
 3621                                 printf("debugging flags now %x\n", cam_dflags);
 3622                         }
 3623                 } else {
 3624                         cam_dpath = NULL;
 3625                         start_ccb->ccb_h.status = CAM_REQ_CMP;
 3626                 }
 3627                 splx(s);
 3628 #else /* !CAMDEBUG */
 3629                 start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
 3630 #endif /* CAMDEBUG */
 3631                 break;
 3632         }
 3633         case XPT_NOOP:
 3634                 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0)
 3635                         xpt_freeze_devq(start_ccb->ccb_h.path, 1);
 3636                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 3637                 break;
 3638         default:
 3639         case XPT_SDEV_TYPE:
 3640         case XPT_TERM_IO:
 3641         case XPT_ENG_INQ:
 3642                 /* XXX Implement */
 3643                 start_ccb->ccb_h.status = CAM_PROVIDE_FAIL;
 3644                 break;
 3645         }
 3646         splx(iopl);
 3647 }
 3648 
 3649 void
 3650 xpt_polled_action(union ccb *start_ccb)
 3651 {
 3652         int       s;
 3653         u_int32_t timeout;
 3654         struct    cam_sim *sim; 
 3655         struct    cam_devq *devq;
 3656         struct    cam_ed *dev;
 3657 
 3658         GIANT_REQUIRED;
 3659 
 3660         timeout = start_ccb->ccb_h.timeout;
 3661         sim = start_ccb->ccb_h.path->bus->sim;
 3662         devq = sim->devq;
 3663         dev = start_ccb->ccb_h.path->device;
 3664 
 3665         s = splcam();
 3666 
 3667         /*
 3668          * Steal an opening so that no other queued requests
 3669          * can get it before us while we simulate interrupts.
 3670          */
 3671         dev->ccbq.devq_openings--;
 3672         dev->ccbq.dev_openings--;       
 3673         
 3674         while(((devq != NULL && devq->send_openings <= 0) ||
 3675            dev->ccbq.dev_openings < 0) && (--timeout > 0)) {
 3676                 DELAY(1000);
 3677                 (*(sim->sim_poll))(sim);
 3678                 camisr(&cam_bioq);
 3679         }
 3680         
 3681         dev->ccbq.devq_openings++;
 3682         dev->ccbq.dev_openings++;
 3683         
 3684         if (timeout != 0) {
 3685                 xpt_action(start_ccb);
 3686                 while(--timeout > 0) {
 3687                         (*(sim->sim_poll))(sim);
 3688                         camisr(&cam_bioq);
 3689                         if ((start_ccb->ccb_h.status  & CAM_STATUS_MASK)
 3690                             != CAM_REQ_INPROG)
 3691                                 break;
 3692                         DELAY(1000);
 3693                 }
 3694                 if (timeout == 0) {
 3695                         /*
 3696                          * XXX Is it worth adding a sim_timeout entry
 3697                          * point so we can attempt recovery?  If
 3698                          * this is only used for dumps, I don't think
 3699                          * it is.
 3700                          */
 3701                         start_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
 3702                 }
 3703         } else {
 3704                 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
 3705         }
 3706         splx(s);
 3707 }
 3708         
 3709 /*
 3710  * Schedule a peripheral driver to receive a ccb when it's
 3711  * target device has space for more transactions.
 3712  */
 3713 void
 3714 xpt_schedule(struct cam_periph *perph, u_int32_t new_priority)
 3715 {
 3716         struct cam_ed *device;
 3717         union ccb *work_ccb;
 3718         int s;
 3719         int runq;
 3720 
 3721         GIANT_REQUIRED;
 3722 
 3723         CAM_DEBUG(perph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n"));
 3724         device = perph->path->device;
 3725         s = splsoftcam();
 3726         if (periph_is_queued(perph)) {
 3727                 /* Simply reorder based on new priority */
 3728                 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
 3729                           ("   change priority to %d\n", new_priority));
 3730                 if (new_priority < perph->pinfo.priority) {
 3731                         camq_change_priority(&device->drvq,
 3732                                              perph->pinfo.index,
 3733                                              new_priority);
 3734                 }
 3735                 runq = 0;
 3736         } else if (SIM_DEAD(perph->path->bus->sim)) {
 3737                 /* The SIM is gone so just call periph_start directly. */
 3738                 work_ccb = xpt_get_ccb(perph->path->device);
 3739                 splx(s);
 3740                 if (work_ccb == NULL)
 3741                         return; /* XXX */
 3742                 xpt_setup_ccb(&work_ccb->ccb_h, perph->path, new_priority);
 3743                 perph->pinfo.priority = new_priority;
 3744                 perph->periph_start(perph, work_ccb);
 3745                 return;
 3746         } else {
 3747                 /* New entry on the queue */
 3748                 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
 3749                           ("   added periph to queue\n"));
 3750                 perph->pinfo.priority = new_priority;
 3751                 perph->pinfo.generation = ++device->drvq.generation;
 3752                 camq_insert(&device->drvq, &perph->pinfo);
 3753                 runq = xpt_schedule_dev_allocq(perph->path->bus, device);
 3754         }
 3755         splx(s);
 3756         if (runq != 0) {
 3757                 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
 3758                           ("   calling xpt_run_devq\n"));
 3759                 xpt_run_dev_allocq(perph->path->bus);
 3760         }
 3761 }
 3762 
 3763 
 3764 /*
 3765  * Schedule a device to run on a given queue.
 3766  * If the device was inserted as a new entry on the queue,
 3767  * return 1 meaning the device queue should be run. If we
 3768  * were already queued, implying someone else has already
 3769  * started the queue, return 0 so the caller doesn't attempt
 3770  * to run the queue.  Must be run at either splsoftcam
 3771  * (or splcam since that encompases splsoftcam).
 3772  */
 3773 static int
 3774 xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo,
 3775                  u_int32_t new_priority)
 3776 {
 3777         int retval;
 3778         u_int32_t old_priority;
 3779 
 3780         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_schedule_dev\n"));
 3781 
 3782         old_priority = pinfo->priority;
 3783 
 3784         /*
 3785          * Are we already queued?
 3786          */
 3787         if (pinfo->index != CAM_UNQUEUED_INDEX) {
 3788                 /* Simply reorder based on new priority */
 3789                 if (new_priority < old_priority) {
 3790                         camq_change_priority(queue, pinfo->index,
 3791                                              new_priority);
 3792                         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3793                                         ("changed priority to %d\n",
 3794                                          new_priority));
 3795                 }
 3796                 retval = 0;
 3797         } else {
 3798                 /* New entry on the queue */
 3799                 if (new_priority < old_priority)
 3800                         pinfo->priority = new_priority;
 3801 
 3802                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3803                                 ("Inserting onto queue\n"));
 3804                 pinfo->generation = ++queue->generation;
 3805                 camq_insert(queue, pinfo);
 3806                 retval = 1;
 3807         }
 3808         return (retval);
 3809 }
 3810 
 3811 static void
 3812 xpt_run_dev_allocq(struct cam_eb *bus)
 3813 {
 3814         struct  cam_devq *devq;
 3815         int     s;
 3816 
 3817         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_allocq\n"));
 3818         devq = bus->sim->devq;
 3819 
 3820         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3821                         ("   qfrozen_cnt == 0x%x, entries == %d, "
 3822                          "openings == %d, active == %d\n",
 3823                          devq->alloc_queue.qfrozen_cnt,
 3824                          devq->alloc_queue.entries,
 3825                          devq->alloc_openings,
 3826                          devq->alloc_active));
 3827 
 3828         s = splsoftcam();
 3829         devq->alloc_queue.qfrozen_cnt++;
 3830         while ((devq->alloc_queue.entries > 0)
 3831             && (devq->alloc_openings > 0)
 3832             && (devq->alloc_queue.qfrozen_cnt <= 1)) {
 3833                 struct  cam_ed_qinfo *qinfo;
 3834                 struct  cam_ed *device;
 3835                 union   ccb *work_ccb;
 3836                 struct  cam_periph *drv;
 3837                 struct  camq *drvq;
 3838                 
 3839                 qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->alloc_queue,
 3840                                                            CAMQ_HEAD);
 3841                 device = qinfo->device;
 3842 
 3843                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3844                                 ("running device %p\n", device));
 3845 
 3846                 drvq = &device->drvq;
 3847 
 3848 #ifdef CAMDEBUG
 3849                 if (drvq->entries <= 0) {
 3850                         panic("xpt_run_dev_allocq: "
 3851                               "Device on queue without any work to do");
 3852                 }
 3853 #endif
 3854                 if ((work_ccb = xpt_get_ccb(device)) != NULL) {
 3855                         devq->alloc_openings--;
 3856                         devq->alloc_active++;
 3857                         drv = (struct cam_periph*)camq_remove(drvq, CAMQ_HEAD);
 3858                         splx(s);
 3859                         xpt_setup_ccb(&work_ccb->ccb_h, drv->path,
 3860                                       drv->pinfo.priority);
 3861                         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3862                                         ("calling periph start\n"));
 3863                         drv->periph_start(drv, work_ccb);
 3864                 } else {
 3865                         /*
 3866                          * Malloc failure in alloc_ccb
 3867                          */
 3868                         /*
 3869                          * XXX add us to a list to be run from free_ccb
 3870                          * if we don't have any ccbs active on this
 3871                          * device queue otherwise we may never get run
 3872                          * again.
 3873                          */
 3874                         break;
 3875                 }
 3876         
 3877                 /* Raise IPL for possible insertion and test at top of loop */
 3878                 s = splsoftcam();
 3879 
 3880                 if (drvq->entries > 0) {
 3881                         /* We have more work.  Attempt to reschedule */
 3882                         xpt_schedule_dev_allocq(bus, device);
 3883                 }
 3884         }
 3885         devq->alloc_queue.qfrozen_cnt--;
 3886         splx(s);
 3887 }
 3888 
 3889 static void
 3890 xpt_run_dev_sendq(struct cam_eb *bus)
 3891 {
 3892         struct  cam_devq *devq;
 3893         int     s;
 3894 
 3895         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_sendq\n"));
 3896         
 3897         devq = bus->sim->devq;
 3898 
 3899         s = splcam();
 3900         devq->send_queue.qfrozen_cnt++;
 3901         splx(s);
 3902         s = splsoftcam();
 3903         while ((devq->send_queue.entries > 0)
 3904             && (devq->send_openings > 0)) {
 3905                 struct  cam_ed_qinfo *qinfo;
 3906                 struct  cam_ed *device;
 3907                 union ccb *work_ccb;
 3908                 struct  cam_sim *sim;
 3909                 int     ospl;
 3910 
 3911                 ospl = splcam();
 3912                 if (devq->send_queue.qfrozen_cnt > 1) {
 3913                         splx(ospl);
 3914                         break;
 3915                 }
 3916 
 3917                 qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->send_queue,
 3918                                                            CAMQ_HEAD);
 3919                 device = qinfo->device;
 3920 
 3921                 /*
 3922                  * If the device has been "frozen", don't attempt
 3923                  * to run it.
 3924                  */
 3925                 if (device->qfrozen_cnt > 0) {
 3926                         splx(ospl);
 3927                         continue;
 3928                 }
 3929 
 3930                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3931                                 ("running device %p\n", device));
 3932 
 3933                 work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD);
 3934                 if (work_ccb == NULL) {
 3935                         printf("device on run queue with no ccbs???\n");
 3936                         splx(ospl);
 3937                         continue;
 3938                 }
 3939 
 3940                 if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) {
 3941 
 3942                         if (num_highpower <= 0) {
 3943                                 /*
 3944                                  * We got a high power command, but we
 3945                                  * don't have any available slots.  Freeze
 3946                                  * the device queue until we have a slot
 3947                                  * available.
 3948                                  */
 3949                                 device->qfrozen_cnt++;
 3950                                 STAILQ_INSERT_TAIL(&highpowerq, 
 3951                                                    &work_ccb->ccb_h, 
 3952                                                    xpt_links.stqe);
 3953 
 3954                                 splx(ospl);
 3955                                 continue;
 3956                         } else {
 3957                                 /*
 3958                                  * Consume a high power slot while
 3959                                  * this ccb runs.
 3960                                  */
 3961                                 num_highpower--;
 3962                         }
 3963                 }
 3964                 devq->active_dev = device;
 3965                 cam_ccbq_remove_ccb(&device->ccbq, work_ccb);
 3966 
 3967                 cam_ccbq_send_ccb(&device->ccbq, work_ccb);
 3968                 splx(ospl);
 3969 
 3970                 devq->send_openings--;
 3971                 devq->send_active++;            
 3972                 
 3973                 if (device->ccbq.queue.entries > 0)
 3974                         xpt_schedule_dev_sendq(bus, device);
 3975 
 3976                 if (work_ccb && (work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0){
 3977                         /*
 3978                          * The client wants to freeze the queue
 3979                          * after this CCB is sent.
 3980                          */
 3981                         ospl = splcam();
 3982                         device->qfrozen_cnt++;
 3983                         splx(ospl);
 3984                 }
 3985                 
 3986                 splx(s);
 3987 
 3988                 /* In Target mode, the peripheral driver knows best... */
 3989                 if (work_ccb->ccb_h.func_code == XPT_SCSI_IO) {
 3990                         if ((device->inq_flags & SID_CmdQue) != 0
 3991                          && work_ccb->csio.tag_action != CAM_TAG_ACTION_NONE)
 3992                                 work_ccb->ccb_h.flags |= CAM_TAG_ACTION_VALID;
 3993                         else
 3994                                 /*
 3995                                  * Clear this in case of a retried CCB that
 3996                                  * failed due to a rejected tag.
 3997                                  */
 3998                                 work_ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
 3999                 }
 4000 
 4001                 /*
 4002                  * Device queues can be shared among multiple sim instances
 4003                  * that reside on different busses.  Use the SIM in the queue
 4004                  * CCB's path, rather than the one in the bus that was passed
 4005                  * into this function.
 4006                  */
 4007                 sim = work_ccb->ccb_h.path->bus->sim;
 4008                 (*(sim->sim_action))(sim, work_ccb);
 4009 
 4010                 ospl = splcam();
 4011                 devq->active_dev = NULL;
 4012                 splx(ospl);
 4013                 /* Raise IPL for possible insertion and test at top of loop */
 4014                 s = splsoftcam();
 4015         }
 4016         splx(s);
 4017         s = splcam();
 4018         devq->send_queue.qfrozen_cnt--;
 4019         splx(s);
 4020 }
 4021 
 4022 /*
 4023  * This function merges stuff from the slave ccb into the master ccb, while
 4024  * keeping important fields in the master ccb constant.
 4025  */
 4026 void
 4027 xpt_merge_ccb(union ccb *master_ccb, union ccb *slave_ccb)
 4028 {
 4029         GIANT_REQUIRED;
 4030 
 4031         /*
 4032          * Pull fields that are valid for peripheral drivers to set
 4033          * into the master CCB along with the CCB "payload".
 4034          */
 4035         master_ccb->ccb_h.retry_count = slave_ccb->ccb_h.retry_count;
 4036         master_ccb->ccb_h.func_code = slave_ccb->ccb_h.func_code;
 4037         master_ccb->ccb_h.timeout = slave_ccb->ccb_h.timeout;
 4038         master_ccb->ccb_h.flags = slave_ccb->ccb_h.flags;
 4039         bcopy(&(&slave_ccb->ccb_h)[1], &(&master_ccb->ccb_h)[1],
 4040               sizeof(union ccb) - sizeof(struct ccb_hdr));
 4041 }
 4042 
 4043 void
 4044 xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority)
 4045 {
 4046         GIANT_REQUIRED;
 4047 
 4048         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_setup_ccb\n"));
 4049         ccb_h->pinfo.priority = priority;
 4050         ccb_h->path = path;
 4051         ccb_h->path_id = path->bus->path_id;
 4052         if (path->target)
 4053                 ccb_h->target_id = path->target->target_id;
 4054         else
 4055                 ccb_h->target_id = CAM_TARGET_WILDCARD;
 4056         if (path->device) {
 4057                 ccb_h->target_lun = path->device->lun_id;
 4058                 ccb_h->pinfo.generation = ++path->device->ccbq.queue.generation;
 4059         } else {
 4060                 ccb_h->target_lun = CAM_TARGET_WILDCARD;
 4061         }
 4062         ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
 4063         ccb_h->flags = 0;
 4064 }
 4065 
 4066 /* Path manipulation functions */
 4067 cam_status
 4068 xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph,
 4069                 path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
 4070 {
 4071         struct     cam_path *path;
 4072         cam_status status;
 4073 
 4074         GIANT_REQUIRED;
 4075 
 4076         path = (struct cam_path *)malloc(sizeof(*path), M_CAMXPT, M_NOWAIT);
 4077 
 4078         if (path == NULL) {
 4079                 status = CAM_RESRC_UNAVAIL;
 4080                 return(status);
 4081         }
 4082         status = xpt_compile_path(path, perph, path_id, target_id, lun_id);
 4083         if (status != CAM_REQ_CMP) {
 4084                 free(path, M_CAMXPT);
 4085                 path = NULL;
 4086         }
 4087         *new_path_ptr = path;
 4088         return (status);
 4089 }
 4090 
 4091 static cam_status
 4092 xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph,
 4093                  path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
 4094 {
 4095         struct       cam_eb *bus;
 4096         struct       cam_et *target;
 4097         struct       cam_ed *device;
 4098         cam_status   status;
 4099         int          s;
 4100 
 4101         status = CAM_REQ_CMP;   /* Completed without error */
 4102         target = NULL;          /* Wildcarded */
 4103         device = NULL;          /* Wildcarded */
 4104 
 4105         /*
 4106          * We will potentially modify the EDT, so block interrupts
 4107          * that may attempt to create cam paths.
 4108          */
 4109         s = splcam();
 4110         bus = xpt_find_bus(path_id);
 4111         if (bus == NULL) {
 4112                 status = CAM_PATH_INVALID;
 4113         } else {
 4114                 target = xpt_find_target(bus, target_id);
 4115                 if (target == NULL) {
 4116                         /* Create one */
 4117                         struct cam_et *new_target;
 4118 
 4119                         new_target = xpt_alloc_target(bus, target_id);
 4120                         if (new_target == NULL) {
 4121                                 status = CAM_RESRC_UNAVAIL;
 4122                         } else {
 4123                                 target = new_target;
 4124                         }
 4125                 }
 4126                 if (target != NULL) {
 4127                         device = xpt_find_device(target, lun_id);
 4128                         if (device == NULL) {
 4129                                 /* Create one */
 4130                                 struct cam_ed *new_device;
 4131 
 4132                                 new_device = xpt_alloc_device(bus,
 4133                                                               target,
 4134                                                               lun_id);
 4135                                 if (new_device == NULL) {
 4136                                         status = CAM_RESRC_UNAVAIL;
 4137                                 } else {
 4138                                         device = new_device;
 4139                                 }
 4140                         }
 4141                 }
 4142         }
 4143         splx(s);
 4144 
 4145         /*
 4146          * Only touch the user's data if we are successful.
 4147          */
 4148         if (status == CAM_REQ_CMP) {
 4149                 new_path->periph = perph;
 4150                 new_path->bus = bus;
 4151                 new_path->target = target;
 4152                 new_path->device = device;
 4153                 CAM_DEBUG(new_path, CAM_DEBUG_TRACE, ("xpt_compile_path\n"));
 4154         } else {
 4155                 if (device != NULL)
 4156                         xpt_release_device(bus, target, device);
 4157                 if (target != NULL)
 4158                         xpt_release_target(bus, target);
 4159                 if (bus != NULL)
 4160                         xpt_release_bus(bus);
 4161         }
 4162         return (status);
 4163 }
 4164 
 4165 static void
 4166 xpt_release_path(struct cam_path *path)
 4167 {
 4168         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_path\n"));
 4169         if (path->device != NULL) {
 4170                 xpt_release_device(path->bus, path->target, path->device);
 4171                 path->device = NULL;
 4172         }
 4173         if (path->target != NULL) {
 4174                 xpt_release_target(path->bus, path->target);
 4175                 path->target = NULL;
 4176         }
 4177         if (path->bus != NULL) {
 4178                 xpt_release_bus(path->bus);
 4179                 path->bus = NULL;
 4180         }
 4181 }
 4182 
 4183 void
 4184 xpt_free_path(struct cam_path *path)
 4185 {
 4186         GIANT_REQUIRED;
 4187 
 4188         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_free_path\n"));
 4189         xpt_release_path(path);
 4190         free(path, M_CAMXPT);
 4191 }
 4192 
 4193 
 4194 /*
 4195  * Return -1 for failure, 0 for exact match, 1 for match with wildcards
 4196  * in path1, 2 for match with wildcards in path2.
 4197  */
 4198 int
 4199 xpt_path_comp(struct cam_path *path1, struct cam_path *path2)
 4200 {
 4201         GIANT_REQUIRED;
 4202 
 4203         int retval = 0;
 4204 
 4205         if (path1->bus != path2->bus) {
 4206                 if (path1->bus->path_id == CAM_BUS_WILDCARD)
 4207                         retval = 1;
 4208                 else if (path2->bus->path_id == CAM_BUS_WILDCARD)
 4209                         retval = 2;
 4210                 else
 4211                         return (-1);
 4212         }
 4213         if (path1->target != path2->target) {
 4214                 if (path1->target->target_id == CAM_TARGET_WILDCARD) {
 4215                         if (retval == 0)
 4216                                 retval = 1;
 4217                 } else if (path2->target->target_id == CAM_TARGET_WILDCARD)
 4218                         retval = 2;
 4219                 else
 4220                         return (-1);
 4221         }
 4222         if (path1->device != path2->device) {
 4223                 if (path1->device->lun_id == CAM_LUN_WILDCARD) {
 4224                         if (retval == 0)
 4225                                 retval = 1;
 4226                 } else if (path2->device->lun_id == CAM_LUN_WILDCARD)
 4227                         retval = 2;
 4228                 else
 4229                         return (-1);
 4230         }
 4231         return (retval);
 4232 }
 4233 
 4234 void
 4235 xpt_print_path(struct cam_path *path)
 4236 {
 4237         GIANT_REQUIRED;
 4238 
 4239         if (path == NULL)
 4240                 printf("(nopath): ");
 4241         else {
 4242                 if (path->periph != NULL)
 4243                         printf("(%s%d:", path->periph->periph_name,
 4244                                path->periph->unit_number);
 4245                 else
 4246                         printf("(noperiph:");
 4247 
 4248                 if (path->bus != NULL)
 4249                         printf("%s%d:%d:", path->bus->sim->sim_name,
 4250                                path->bus->sim->unit_number,
 4251                                path->bus->sim->bus_id);
 4252                 else
 4253                         printf("nobus:");
 4254 
 4255                 if (path->target != NULL)
 4256                         printf("%d:", path->target->target_id);
 4257                 else
 4258                         printf("X:");
 4259 
 4260                 if (path->device != NULL)
 4261                         printf("%d): ", path->device->lun_id);
 4262                 else
 4263                         printf("X): ");
 4264         }
 4265 }
 4266 
 4267 int
 4268 xpt_path_string(struct cam_path *path, char *str, size_t str_len)
 4269 {
 4270         struct sbuf sb;
 4271 
 4272         GIANT_REQUIRED;
 4273 
 4274         sbuf_new(&sb, str, str_len, 0);
 4275 
 4276         if (path == NULL)
 4277                 sbuf_printf(&sb, "(nopath): ");
 4278         else {
 4279                 if (path->periph != NULL)
 4280                         sbuf_printf(&sb, "(%s%d:", path->periph->periph_name,
 4281                                     path->periph->unit_number);
 4282                 else
 4283                         sbuf_printf(&sb, "(noperiph:");
 4284 
 4285                 if (path->bus != NULL)
 4286                         sbuf_printf(&sb, "%s%d:%d:", path->bus->sim->sim_name,
 4287                                     path->bus->sim->unit_number,
 4288                                     path->bus->sim->bus_id);
 4289                 else
 4290                         sbuf_printf(&sb, "nobus:");
 4291 
 4292                 if (path->target != NULL)
 4293                         sbuf_printf(&sb, "%d:", path->target->target_id);
 4294                 else
 4295                         sbuf_printf(&sb, "X:");
 4296 
 4297                 if (path->device != NULL)
 4298                         sbuf_printf(&sb, "%d): ", path->device->lun_id);
 4299                 else
 4300                         sbuf_printf(&sb, "X): ");
 4301         }
 4302         sbuf_finish(&sb);
 4303 
 4304         return(sbuf_len(&sb));
 4305 }
 4306 
 4307 path_id_t
 4308 xpt_path_path_id(struct cam_path *path)
 4309 {
 4310         GIANT_REQUIRED;
 4311 
 4312         return(path->bus->path_id);
 4313 }
 4314 
 4315 target_id_t
 4316 xpt_path_target_id(struct cam_path *path)
 4317 {
 4318         GIANT_REQUIRED;
 4319 
 4320         if (path->target != NULL)
 4321                 return (path->target->target_id);
 4322         else
 4323                 return (CAM_TARGET_WILDCARD);
 4324 }
 4325 
 4326 lun_id_t
 4327 xpt_path_lun_id(struct cam_path *path)
 4328 {
 4329         GIANT_REQUIRED;
 4330 
 4331         if (path->device != NULL)
 4332                 return (path->device->lun_id);
 4333         else
 4334                 return (CAM_LUN_WILDCARD);
 4335 }
 4336 
 4337 struct cam_sim *
 4338 xpt_path_sim(struct cam_path *path)
 4339 {
 4340         GIANT_REQUIRED;
 4341 
 4342         return (path->bus->sim);
 4343 }
 4344 
 4345 struct cam_periph*
 4346 xpt_path_periph(struct cam_path *path)
 4347 {
 4348         GIANT_REQUIRED;
 4349 
 4350         return (path->periph);
 4351 }
 4352 
 4353 /*
 4354  * Release a CAM control block for the caller.  Remit the cost of the structure
 4355  * to the device referenced by the path.  If the this device had no 'credits'
 4356  * and peripheral drivers have registered async callbacks for this notification
 4357  * call them now.
 4358  */
 4359 void
 4360 xpt_release_ccb(union ccb *free_ccb)
 4361 {
 4362         int      s;
 4363         struct   cam_path *path;
 4364         struct   cam_ed *device;
 4365         struct   cam_eb *bus;
 4366 
 4367         GIANT_REQUIRED;
 4368 
 4369         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_release_ccb\n"));
 4370         path = free_ccb->ccb_h.path;
 4371         device = path->device;
 4372         bus = path->bus;
 4373         s = splsoftcam();
 4374         cam_ccbq_release_opening(&device->ccbq);
 4375         if (xpt_ccb_count > xpt_max_ccbs) {
 4376                 xpt_free_ccb(free_ccb);
 4377                 xpt_ccb_count--;
 4378         } else {
 4379                 SLIST_INSERT_HEAD(&ccb_freeq, &free_ccb->ccb_h, xpt_links.sle);
 4380         }
 4381         if (bus->sim->devq == NULL) {
 4382                 splx(s);
 4383                 return;
 4384         }
 4385         bus->sim->devq->alloc_openings++;
 4386         bus->sim->devq->alloc_active--;
 4387         /* XXX Turn this into an inline function - xpt_run_device?? */
 4388         if ((device_is_alloc_queued(device) == 0)
 4389          && (device->drvq.entries > 0)) {
 4390                 xpt_schedule_dev_allocq(bus, device);
 4391         }
 4392         splx(s);
 4393         if (dev_allocq_is_runnable(bus->sim->devq))
 4394                 xpt_run_dev_allocq(bus);
 4395 }
 4396 
 4397 /* Functions accessed by SIM drivers */
 4398 
 4399 /*
 4400  * A sim structure, listing the SIM entry points and instance
 4401  * identification info is passed to xpt_bus_register to hook the SIM
 4402  * into the CAM framework.  xpt_bus_register creates a cam_eb entry
 4403  * for this new bus and places it in the array of busses and assigns
 4404  * it a path_id.  The path_id may be influenced by "hard wiring"
 4405  * information specified by the user.  Once interrupt services are
 4406  * availible, the bus will be probed.
 4407  */
 4408 int32_t
 4409 xpt_bus_register(struct cam_sim *sim, u_int32_t bus)
 4410 {
 4411         struct cam_eb *new_bus;
 4412         struct cam_eb *old_bus;
 4413         struct ccb_pathinq cpi;
 4414         int s;
 4415 
 4416         GIANT_REQUIRED;
 4417 
 4418         sim->bus_id = bus;
 4419         new_bus = (struct cam_eb *)malloc(sizeof(*new_bus),
 4420                                           M_CAMXPT, M_NOWAIT);
 4421         if (new_bus == NULL) {
 4422                 /* Couldn't satisfy request */
 4423                 return (CAM_RESRC_UNAVAIL);
 4424         }
 4425 
 4426         if (strcmp(sim->sim_name, "xpt") != 0) {
 4427 
 4428                 sim->path_id =
 4429                     xptpathid(sim->sim_name, sim->unit_number, sim->bus_id);
 4430         }
 4431 
 4432         TAILQ_INIT(&new_bus->et_entries);
 4433         new_bus->path_id = sim->path_id;
 4434         new_bus->sim = sim;
 4435         timevalclear(&new_bus->last_reset);
 4436         new_bus->flags = 0;
 4437         new_bus->refcount = 1;  /* Held until a bus_deregister event */
 4438         new_bus->generation = 0;
 4439         s = splcam();
 4440         old_bus = TAILQ_FIRST(&xpt_busses);
 4441         while (old_bus != NULL
 4442             && old_bus->path_id < new_bus->path_id)
 4443                 old_bus = TAILQ_NEXT(old_bus, links);
 4444         if (old_bus != NULL)
 4445                 TAILQ_INSERT_BEFORE(old_bus, new_bus, links);
 4446         else
 4447                 TAILQ_INSERT_TAIL(&xpt_busses, new_bus, links);
 4448         bus_generation++;
 4449         splx(s);
 4450 
 4451         /* Notify interested parties */
 4452         if (sim->path_id != CAM_XPT_PATH_ID) {
 4453                 struct cam_path path;
 4454 
 4455                 xpt_compile_path(&path, /*periph*/NULL, sim->path_id,
 4456                                  CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
 4457                 xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
 4458                 cpi.ccb_h.func_code = XPT_PATH_INQ;
 4459                 xpt_action((union ccb *)&cpi);
 4460                 xpt_async(AC_PATH_REGISTERED, &path, &cpi);
 4461                 xpt_release_path(&path);
 4462         }
 4463         return (CAM_SUCCESS);
 4464 }
 4465 
 4466 int32_t
 4467 xpt_bus_deregister(path_id_t pathid)
 4468 {
 4469         struct cam_path bus_path;
 4470         struct cam_ed *device;
 4471         struct cam_ed_qinfo *qinfo;
 4472         struct cam_devq *devq;
 4473         struct cam_periph *periph;
 4474         struct cam_sim *ccbsim;
 4475         union ccb *work_ccb;
 4476         cam_status status;
 4477 
 4478         GIANT_REQUIRED;
 4479 
 4480         status = xpt_compile_path(&bus_path, NULL, pathid,
 4481                                   CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
 4482         if (status != CAM_REQ_CMP)
 4483                 return (status);
 4484 
 4485         xpt_async(AC_LOST_DEVICE, &bus_path, NULL);
 4486         xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL);
 4487 
 4488         /* The SIM may be gone, so use a dummy SIM for any stray operations. */
 4489         devq = bus_path.bus->sim->devq;
 4490         bus_path.bus->sim = &cam_dead_sim;
 4491 
 4492         /* Execute any pending operations now. */
 4493         while ((qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->send_queue,
 4494             CAMQ_HEAD)) != NULL ||
 4495             (qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->alloc_queue,
 4496             CAMQ_HEAD)) != NULL) {
 4497                 do {
 4498                         device = qinfo->device;
 4499                         work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD);
 4500                         if (work_ccb != NULL) {
 4501                                 devq->active_dev = device;
 4502                                 cam_ccbq_remove_ccb(&device->ccbq, work_ccb);
 4503                                 cam_ccbq_send_ccb(&device->ccbq, work_ccb);
 4504                                 ccbsim = work_ccb->ccb_h.path->bus->sim;
 4505                                 (*(ccbsim->sim_action))(ccbsim, work_ccb);
 4506                         }
 4507 
 4508                         periph = (struct cam_periph *)camq_remove(&device->drvq,
 4509                             CAMQ_HEAD);
 4510                         if (periph != NULL)
 4511                                 xpt_schedule(periph, periph->pinfo.priority);
 4512                 } while (work_ccb != NULL || periph != NULL);
 4513         }
 4514 
 4515         /* Make sure all completed CCBs are processed. */
 4516         while (!TAILQ_EMPTY(&cam_bioq)) {
 4517                 camisr(&cam_bioq);
 4518 
 4519                 /* Repeat the async's for the benefit of any new devices. */
 4520                 xpt_async(AC_LOST_DEVICE, &bus_path, NULL);
 4521                 xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL);
 4522         }
 4523 
 4524         /* Release the reference count held while registered. */
 4525         xpt_release_bus(bus_path.bus);
 4526         xpt_release_path(&bus_path);
 4527 
 4528         /* Recheck for more completed CCBs. */
 4529         while (!TAILQ_EMPTY(&cam_bioq))
 4530                 camisr(&cam_bioq);
 4531 
 4532         return (CAM_REQ_CMP);
 4533 }
 4534 
 4535 static path_id_t
 4536 xptnextfreepathid(void)
 4537 {
 4538         struct cam_eb *bus;
 4539         path_id_t pathid;
 4540         const char *strval;
 4541 
 4542         pathid = 0;
 4543         bus = TAILQ_FIRST(&xpt_busses);
 4544 retry:
 4545         /* Find an unoccupied pathid */
 4546         while (bus != NULL
 4547             && bus->path_id <= pathid) {
 4548                 if (bus->path_id == pathid)
 4549                         pathid++;
 4550                 bus = TAILQ_NEXT(bus, links);
 4551         }
 4552 
 4553         /*
 4554          * Ensure that this pathid is not reserved for
 4555          * a bus that may be registered in the future.
 4556          */
 4557         if (resource_string_value("scbus", pathid, "at", &strval) == 0) {
 4558                 ++pathid;
 4559                 /* Start the search over */
 4560                 goto retry;
 4561         }
 4562         return (pathid);
 4563 }
 4564 
 4565 static path_id_t
 4566 xptpathid(const char *sim_name, int sim_unit, int sim_bus)
 4567 {
 4568         path_id_t pathid;
 4569         int i, dunit, val;
 4570         char buf[32];
 4571         const char *dname;
 4572 
 4573         pathid = CAM_XPT_PATH_ID;
 4574         snprintf(buf, sizeof(buf), "%s%d", sim_name, sim_unit);
 4575         i = 0;
 4576         while ((resource_find_match(&i, &dname, &dunit, "at", buf)) == 0) {
 4577                 if (strcmp(dname, "scbus")) {
 4578                         /* Avoid a bit of foot shooting. */
 4579                         continue;
 4580                 }
 4581                 if (dunit < 0)          /* unwired?! */
 4582                         continue;
 4583                 if (resource_int_value("scbus", dunit, "bus", &val) == 0) {
 4584                         if (sim_bus == val) {
 4585                                 pathid = dunit;
 4586                                 break;
 4587                         }
 4588                 } else if (sim_bus == 0) {
 4589                         /* Unspecified matches bus 0 */
 4590                         pathid = dunit;
 4591                         break;
 4592                 } else {
 4593                         printf("Ambiguous scbus configuration for %s%d "
 4594                                "bus %d, cannot wire down.  The kernel "
 4595                                "config entry for scbus%d should "
 4596                                "specify a controller bus.\n"
 4597                                "Scbus will be assigned dynamically.\n",
 4598                                sim_name, sim_unit, sim_bus, dunit);
 4599                         break;
 4600                 }
 4601         }
 4602 
 4603         if (pathid == CAM_XPT_PATH_ID)
 4604                 pathid = xptnextfreepathid();
 4605         return (pathid);
 4606 }
 4607 
 4608 void
 4609 xpt_async(u_int32_t async_code, struct cam_path *path, void *async_arg)
 4610 {
 4611         struct cam_eb *bus;
 4612         struct cam_et *target, *next_target;
 4613         struct cam_ed *device, *next_device;
 4614         int s;
 4615 
 4616         GIANT_REQUIRED;
 4617 
 4618         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_async\n"));
 4619 
 4620         /*
 4621          * Most async events come from a CAM interrupt context.  In
 4622          * a few cases, the error recovery code at the peripheral layer,
 4623          * which may run from our SWI or a process context, may signal
 4624          * deferred events with a call to xpt_async. Ensure async
 4625          * notifications are serialized by blocking cam interrupts.
 4626          */
 4627         s = splcam();
 4628 
 4629         bus = path->bus;
 4630 
 4631         if (async_code == AC_BUS_RESET) { 
 4632                 int s;
 4633 
 4634                 s = splclock();
 4635                 /* Update our notion of when the last reset occurred */
 4636                 microtime(&bus->last_reset);
 4637                 splx(s);
 4638         }
 4639 
 4640         for (target = TAILQ_FIRST(&bus->et_entries);
 4641              target != NULL;
 4642              target = next_target) {
 4643 
 4644                 next_target = TAILQ_NEXT(target, links);
 4645 
 4646                 if (path->target != target
 4647                  && path->target->target_id != CAM_TARGET_WILDCARD
 4648                  && target->target_id != CAM_TARGET_WILDCARD)
 4649                         continue;
 4650 
 4651                 if (async_code == AC_SENT_BDR) {
 4652                         int s;
 4653 
 4654                         /* Update our notion of when the last reset occurred */
 4655                         s = splclock();
 4656                         microtime(&path->target->last_reset);
 4657                         splx(s);
 4658                 }
 4659 
 4660                 for (device = TAILQ_FIRST(&target->ed_entries);
 4661                      device != NULL;
 4662                      device = next_device) {
 4663 
 4664                         next_device = TAILQ_NEXT(device, links);
 4665 
 4666                         if (path->device != device 
 4667                          && path->device->lun_id != CAM_LUN_WILDCARD
 4668                          && device->lun_id != CAM_LUN_WILDCARD)
 4669                                 continue;
 4670 
 4671                         xpt_dev_async(async_code, bus, target,
 4672                                       device, async_arg);
 4673 
 4674                         xpt_async_bcast(&device->asyncs, async_code,
 4675                                         path, async_arg);
 4676                 }
 4677         }
 4678         
 4679         /*
 4680          * If this wasn't a fully wildcarded async, tell all
 4681          * clients that want all async events.
 4682          */
 4683         if (bus != xpt_periph->path->bus)
 4684                 xpt_async_bcast(&xpt_periph->path->device->asyncs, async_code,
 4685                                 path, async_arg);
 4686         splx(s);
 4687 }
 4688 
 4689 static void
 4690 xpt_async_bcast(struct async_list *async_head,
 4691                 u_int32_t async_code,
 4692                 struct cam_path *path, void *async_arg)
 4693 {
 4694         struct async_node *cur_entry;
 4695 
 4696         cur_entry = SLIST_FIRST(async_head);
 4697         while (cur_entry != NULL) {
 4698                 struct async_node *next_entry;
 4699                 /*
 4700                  * Grab the next list entry before we call the current
 4701                  * entry's callback.  This is because the callback function
 4702                  * can delete its async callback entry.
 4703                  */
 4704                 next_entry = SLIST_NEXT(cur_entry, links);
 4705                 if ((cur_entry->event_enable & async_code) != 0)
 4706                         cur_entry->callback(cur_entry->callback_arg,
 4707                                             async_code, path,
 4708                                             async_arg);
 4709                 cur_entry = next_entry;
 4710         }
 4711 }
 4712 
 4713 /*
 4714  * Handle any per-device event notifications that require action by the XPT.
 4715  */
 4716 static void
 4717 xpt_dev_async(u_int32_t async_code, struct cam_eb *bus, struct cam_et *target,
 4718               struct cam_ed *device, void *async_arg)
 4719 {
 4720         cam_status status;
 4721         struct cam_path newpath;
 4722 
 4723         /*
 4724          * We only need to handle events for real devices.
 4725          */
 4726         if (target->target_id == CAM_TARGET_WILDCARD
 4727          || device->lun_id == CAM_LUN_WILDCARD)
 4728                 return;
 4729 
 4730         /*
 4731          * We need our own path with wildcards expanded to
 4732          * handle certain types of events.
 4733          */
 4734         if ((async_code == AC_SENT_BDR)
 4735          || (async_code == AC_BUS_RESET)
 4736          || (async_code == AC_INQ_CHANGED))
 4737                 status = xpt_compile_path(&newpath, NULL,
 4738                                           bus->path_id,
 4739                                           target->target_id,
 4740                                           device->lun_id);
 4741         else
 4742                 status = CAM_REQ_CMP_ERR;
 4743 
 4744         if (status == CAM_REQ_CMP) {
 4745 
 4746                 /*
 4747                  * Allow transfer negotiation to occur in a
 4748                  * tag free environment.
 4749                  */
 4750                 if (async_code == AC_SENT_BDR
 4751                  || async_code == AC_BUS_RESET)
 4752                         xpt_toggle_tags(&newpath);
 4753 
 4754                 if (async_code == AC_INQ_CHANGED) {
 4755                         /*
 4756                          * We've sent a start unit command, or
 4757                          * something similar to a device that
 4758                          * may have caused its inquiry data to
 4759                          * change. So we re-scan the device to
 4760                          * refresh the inquiry data for it.
 4761                          */
 4762                         xpt_scan_lun(newpath.periph, &newpath,
 4763                                      CAM_EXPECT_INQ_CHANGE, NULL);
 4764                 }
 4765                 xpt_release_path(&newpath);
 4766         } else if (async_code == AC_LOST_DEVICE) {
 4767                 device->flags |= CAM_DEV_UNCONFIGURED;
 4768         } else if (async_code == AC_TRANSFER_NEG) {
 4769                 struct ccb_trans_settings *settings;
 4770 
 4771                 settings = (struct ccb_trans_settings *)async_arg;
 4772                 xpt_set_transfer_settings(settings, device,
 4773                                           /*async_update*/TRUE);
 4774         }
 4775 }
 4776 
 4777 u_int32_t
 4778 xpt_freeze_devq(struct cam_path *path, u_int count)
 4779 {
 4780         int s;
 4781         struct ccb_hdr *ccbh;
 4782 
 4783         GIANT_REQUIRED;
 4784 
 4785         s = splcam();
 4786         path->device->qfrozen_cnt += count;
 4787 
 4788         /*
 4789          * Mark the last CCB in the queue as needing
 4790          * to be requeued if the driver hasn't
 4791          * changed it's state yet.  This fixes a race
 4792          * where a ccb is just about to be queued to
 4793          * a controller driver when it's interrupt routine
 4794          * freezes the queue.  To completly close the
 4795          * hole, controller drives must check to see
 4796          * if a ccb's status is still CAM_REQ_INPROG
 4797          * under spl protection just before they queue
 4798          * the CCB.  See ahc_action/ahc_freeze_devq for
 4799          * an example.
 4800          */
 4801         ccbh = TAILQ_LAST(&path->device->ccbq.active_ccbs, ccb_hdr_tailq);
 4802         if (ccbh && ccbh->status == CAM_REQ_INPROG)
 4803                 ccbh->status = CAM_REQUEUE_REQ;
 4804         splx(s);
 4805         return (path->device->qfrozen_cnt);
 4806 }
 4807 
 4808 u_int32_t
 4809 xpt_freeze_simq(struct cam_sim *sim, u_int count)
 4810 {
 4811         GIANT_REQUIRED;
 4812 
 4813         sim->devq->send_queue.qfrozen_cnt += count;
 4814         if (sim->devq->active_dev != NULL) {
 4815                 struct ccb_hdr *ccbh;
 4816                 
 4817                 ccbh = TAILQ_LAST(&sim->devq->active_dev->ccbq.active_ccbs,
 4818                                   ccb_hdr_tailq);
 4819                 if (ccbh && ccbh->status == CAM_REQ_INPROG)
 4820                         ccbh->status = CAM_REQUEUE_REQ;
 4821         }
 4822         return (sim->devq->send_queue.qfrozen_cnt);
 4823 }
 4824 
 4825 static void
 4826 xpt_release_devq_timeout(void *arg)
 4827 {
 4828         struct cam_ed *device;
 4829 
 4830         device = (struct cam_ed *)arg;
 4831 
 4832         xpt_release_devq_device(device, /*count*/1, /*run_queue*/TRUE);
 4833 }
 4834 
 4835 void
 4836 xpt_release_devq(struct cam_path *path, u_int count, int run_queue)
 4837 {
 4838         GIANT_REQUIRED;
 4839 
 4840         xpt_release_devq_device(path->device, count, run_queue);
 4841 }
 4842 
 4843 static void
 4844 xpt_release_devq_device(struct cam_ed *dev, u_int count, int run_queue)
 4845 {
 4846         int     rundevq;
 4847         int     s0, s1;
 4848 
 4849         rundevq = 0;
 4850         s0 = splsoftcam();
 4851         s1 = splcam();
 4852         if (dev->qfrozen_cnt > 0) {
 4853 
 4854                 count = (count > dev->qfrozen_cnt) ? dev->qfrozen_cnt : count;
 4855                 dev->qfrozen_cnt -= count;
 4856                 if (dev->qfrozen_cnt == 0) {
 4857 
 4858                         /*
 4859                          * No longer need to wait for a successful
 4860                          * command completion.
 4861                          */
 4862                         dev->flags &= ~CAM_DEV_REL_ON_COMPLETE;
 4863 
 4864                         /*
 4865                          * Remove any timeouts that might be scheduled
 4866                          * to release this queue.
 4867                          */
 4868                         if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
 4869                                 untimeout(xpt_release_devq_timeout, dev,
 4870                                           dev->c_handle);
 4871                                 dev->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING;
 4872                         }
 4873 
 4874                         /*
 4875                          * Now that we are unfrozen schedule the
 4876                          * device so any pending transactions are
 4877                          * run.
 4878                          */
 4879                         if ((dev->ccbq.queue.entries > 0)
 4880                          && (xpt_schedule_dev_sendq(dev->target->bus, dev))
 4881                          && (run_queue != 0)) {
 4882                                 rundevq = 1;
 4883                         }
 4884                 }
 4885         }
 4886         splx(s1);
 4887         if (rundevq != 0)
 4888                 xpt_run_dev_sendq(dev->target->bus);
 4889         splx(s0);
 4890 }
 4891 
 4892 void
 4893 xpt_release_simq(struct cam_sim *sim, int run_queue)
 4894 {
 4895         int     s;
 4896         struct  camq *sendq;
 4897 
 4898         GIANT_REQUIRED;
 4899 
 4900         sendq = &(sim->devq->send_queue);
 4901         s = splcam();
 4902         if (sendq->qfrozen_cnt > 0) {
 4903 
 4904                 sendq->qfrozen_cnt--;
 4905                 if (sendq->qfrozen_cnt == 0) {
 4906                         struct cam_eb *bus;
 4907 
 4908                         /*
 4909                          * If there is a timeout scheduled to release this
 4910                          * sim queue, remove it.  The queue frozen count is
 4911                          * already at 0.
 4912                          */
 4913                         if ((sim->flags & CAM_SIM_REL_TIMEOUT_PENDING) != 0){
 4914                                 untimeout(xpt_release_simq_timeout, sim,
 4915                                           sim->c_handle);
 4916                                 sim->flags &= ~CAM_SIM_REL_TIMEOUT_PENDING;
 4917                         }
 4918                         bus = xpt_find_bus(sim->path_id);
 4919                         splx(s);
 4920 
 4921                         if (run_queue) {
 4922                                 /*
 4923                                  * Now that we are unfrozen run the send queue.
 4924                                  */
 4925                                 xpt_run_dev_sendq(bus);
 4926                         }
 4927                         xpt_release_bus(bus);
 4928                 } else
 4929                         splx(s);
 4930         } else
 4931                 splx(s);
 4932 }
 4933 
 4934 static void
 4935 xpt_release_simq_timeout(void *arg)
 4936 {
 4937         struct cam_sim *sim;
 4938 
 4939         sim = (struct cam_sim *)arg;
 4940         xpt_release_simq(sim, /* run_queue */ TRUE);
 4941 }
 4942 
 4943 void
 4944 xpt_done(union ccb *done_ccb)
 4945 {
 4946         int s;
 4947 
 4948         s = splcam();
 4949 
 4950         CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_done\n"));
 4951         if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) != 0) {
 4952                 /*
 4953                  * Queue up the request for handling by our SWI handler
 4954                  * any of the "non-immediate" type of ccbs.
 4955                  */
 4956                 switch (done_ccb->ccb_h.path->periph->type) {
 4957                 case CAM_PERIPH_BIO:
 4958                         mtx_lock(&cam_bioq_lock);
 4959                         TAILQ_INSERT_TAIL(&cam_bioq, &done_ccb->ccb_h,
 4960                                           sim_links.tqe);
 4961                         done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX;
 4962                         mtx_unlock(&cam_bioq_lock);
 4963                         swi_sched(cambio_ih, 0);
 4964                         break;
 4965                 default:
 4966                         panic("unknown periph type %d",
 4967                             done_ccb->ccb_h.path->periph->type);
 4968                 }
 4969         }
 4970         splx(s);
 4971 }
 4972 
 4973 union ccb *
 4974 xpt_alloc_ccb()
 4975 {
 4976         union ccb *new_ccb;
 4977 
 4978         GIANT_REQUIRED;
 4979 
 4980         new_ccb = malloc(sizeof(*new_ccb), M_CAMXPT, M_WAITOK);
 4981         return (new_ccb);
 4982 }
 4983 
 4984 union ccb *
 4985 xpt_alloc_ccb_nowait()
 4986 {
 4987         union ccb *new_ccb;
 4988 
 4989         GIANT_REQUIRED;
 4990 
 4991         new_ccb = malloc(sizeof(*new_ccb), M_CAMXPT, M_NOWAIT);
 4992         return (new_ccb);
 4993 }
 4994 
 4995 void
 4996 xpt_free_ccb(union ccb *free_ccb)
 4997 {
 4998         free(free_ccb, M_CAMXPT);
 4999 }
 5000 
 5001 
 5002 
 5003 /* Private XPT functions */
 5004 
 5005 /*
 5006  * Get a CAM control block for the caller. Charge the structure to the device
 5007  * referenced by the path.  If the this device has no 'credits' then the
 5008  * device already has the maximum number of outstanding operations under way
 5009  * and we return NULL. If we don't have sufficient resources to allocate more
 5010  * ccbs, we also return NULL.
 5011  */
 5012 static union ccb *
 5013 xpt_get_ccb(struct cam_ed *device)
 5014 {
 5015         union ccb *new_ccb;
 5016         int s;
 5017 
 5018         s = splsoftcam();
 5019         if ((new_ccb = (union ccb *)SLIST_FIRST(&ccb_freeq)) == NULL) {
 5020                 new_ccb = xpt_alloc_ccb_nowait();
 5021                 if (new_ccb == NULL) {
 5022                         splx(s);
 5023                         return (NULL);
 5024                 }
 5025                 callout_handle_init(&new_ccb->ccb_h.timeout_ch);
 5026                 SLIST_INSERT_HEAD(&ccb_freeq, &new_ccb->ccb_h,
 5027                                   xpt_links.sle);
 5028                 xpt_ccb_count++;
 5029         }
 5030         cam_ccbq_take_opening(&device->ccbq);
 5031         SLIST_REMOVE_HEAD(&ccb_freeq, xpt_links.sle);
 5032         splx(s);
 5033         return (new_ccb);
 5034 }
 5035 
 5036 static void
 5037 xpt_release_bus(struct cam_eb *bus)
 5038 {
 5039         int s;
 5040 
 5041         s = splcam();
 5042         if ((--bus->refcount == 0)
 5043          && (TAILQ_FIRST(&bus->et_entries) == NULL)) {
 5044                 TAILQ_REMOVE(&xpt_busses, bus, links);
 5045                 bus_generation++;
 5046                 splx(s);
 5047                 free(bus, M_CAMXPT);
 5048         } else
 5049                 splx(s);
 5050 }
 5051 
 5052 static struct cam_et *
 5053 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id)
 5054 {
 5055         struct cam_et *target;
 5056 
 5057         target = (struct cam_et *)malloc(sizeof(*target), M_CAMXPT, M_NOWAIT);
 5058         if (target != NULL) {
 5059                 struct cam_et *cur_target;
 5060 
 5061                 TAILQ_INIT(&target->ed_entries);
 5062                 target->bus = bus;
 5063                 target->target_id = target_id;
 5064                 target->refcount = 1;
 5065                 target->generation = 0;
 5066                 timevalclear(&target->last_reset);
 5067                 /*
 5068                  * Hold a reference to our parent bus so it
 5069                  * will not go away before we do.
 5070                  */
 5071                 bus->refcount++;
 5072 
 5073                 /* Insertion sort into our bus's target list */
 5074                 cur_target = TAILQ_FIRST(&bus->et_entries);
 5075                 while (cur_target != NULL && cur_target->target_id < target_id)
 5076                         cur_target = TAILQ_NEXT(cur_target, links);
 5077 
 5078                 if (cur_target != NULL) {
 5079                         TAILQ_INSERT_BEFORE(cur_target, target, links);
 5080                 } else {
 5081                         TAILQ_INSERT_TAIL(&bus->et_entries, target, links);
 5082                 }
 5083                 bus->generation++;
 5084         }
 5085         return (target);
 5086 }
 5087 
 5088 static void
 5089 xpt_release_target(struct cam_eb *bus, struct cam_et *target)
 5090 {
 5091         int s;
 5092 
 5093         s = splcam();
 5094         if ((--target->refcount == 0)
 5095          && (TAILQ_FIRST(&target->ed_entries) == NULL)) {
 5096                 TAILQ_REMOVE(&bus->et_entries, target, links);
 5097                 bus->generation++;
 5098                 splx(s);
 5099                 free(target, M_CAMXPT);
 5100                 xpt_release_bus(bus);
 5101         } else
 5102                 splx(s);
 5103 }
 5104 
 5105 static struct cam_ed *
 5106 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id)
 5107 {
 5108 #ifdef CAM_NEW_TRAN_CODE
 5109         struct     cam_path path;
 5110 #endif /* CAM_NEW_TRAN_CODE */
 5111         struct     cam_ed *device;
 5112         struct     cam_devq *devq;
 5113         cam_status status;
 5114 
 5115         if (SIM_DEAD(bus->sim))
 5116                 return (NULL);
 5117 
 5118         /* Make space for us in the device queue on our bus */
 5119         devq = bus->sim->devq;
 5120         status = cam_devq_resize(devq, devq->alloc_queue.array_size + 1);
 5121 
 5122         if (status != CAM_REQ_CMP) {
 5123                 device = NULL;
 5124         } else {
 5125                 device = (struct cam_ed *)malloc(sizeof(*device),
 5126                                                  M_CAMXPT, M_NOWAIT);
 5127         }
 5128 
 5129         if (device != NULL) {
 5130                 struct cam_ed *cur_device;
 5131 
 5132                 cam_init_pinfo(&device->alloc_ccb_entry.pinfo);
 5133                 device->alloc_ccb_entry.device = device;
 5134                 cam_init_pinfo(&device->send_ccb_entry.pinfo);
 5135                 device->send_ccb_entry.device = device;
 5136                 device->target = target;
 5137                 device->lun_id = lun_id;
 5138                 /* Initialize our queues */
 5139                 if (camq_init(&device->drvq, 0) != 0) {
 5140                         free(device, M_CAMXPT);
 5141                         return (NULL);
 5142                 }
 5143                 if (cam_ccbq_init(&device->ccbq,
 5144                                   bus->sim->max_dev_openings) != 0) {
 5145                         camq_fini(&device->drvq);
 5146                         free(device, M_CAMXPT);
 5147                         return (NULL);
 5148                 }
 5149                 SLIST_INIT(&device->asyncs);
 5150                 SLIST_INIT(&device->periphs);
 5151                 device->generation = 0;
 5152                 device->owner = NULL;
 5153                 /*
 5154                  * Take the default quirk entry until we have inquiry
 5155                  * data and can determine a better quirk to use.
 5156                  */
 5157                 device->quirk = &xpt_quirk_table[xpt_quirk_table_size - 1];
 5158                 bzero(&device->inq_data, sizeof(device->inq_data));
 5159                 device->inq_flags = 0;
 5160                 device->queue_flags = 0;
 5161                 device->serial_num = NULL;
 5162                 device->serial_num_len = 0;
 5163                 device->qfrozen_cnt = 0;
 5164                 device->flags = CAM_DEV_UNCONFIGURED;
 5165                 device->tag_delay_count = 0;
 5166                 device->tag_saved_openings = 0;
 5167                 device->refcount = 1;
 5168                 callout_handle_init(&device->c_handle);
 5169 
 5170                 /*
 5171                  * Hold a reference to our parent target so it
 5172                  * will not go away before we do.
 5173                  */
 5174                 target->refcount++;
 5175 
 5176                 /*
 5177                  * XXX should be limited by number of CCBs this bus can
 5178                  * do.
 5179                  */
 5180                 xpt_max_ccbs += device->ccbq.devq_openings;
 5181                 /* Insertion sort into our target's device list */
 5182                 cur_device = TAILQ_FIRST(&target->ed_entries);
 5183                 while (cur_device != NULL && cur_device->lun_id < lun_id)
 5184                         cur_device = TAILQ_NEXT(cur_device, links);
 5185                 if (cur_device != NULL) {
 5186                         TAILQ_INSERT_BEFORE(cur_device, device, links);
 5187                 } else {
 5188                         TAILQ_INSERT_TAIL(&target->ed_entries, device, links);
 5189                 }
 5190                 target->generation++;
 5191 #ifdef CAM_NEW_TRAN_CODE
 5192                 if (lun_id != CAM_LUN_WILDCARD) {
 5193                         xpt_compile_path(&path,
 5194                                          NULL,
 5195                                          bus->path_id,
 5196                                          target->target_id,
 5197                                          lun_id);
 5198                         xpt_devise_transport(&path);
 5199                         xpt_release_path(&path);
 5200                 }
 5201 #endif /* CAM_NEW_TRAN_CODE */
 5202         }
 5203         return (device);
 5204 }
 5205 
 5206 static void
 5207 xpt_release_device(struct cam_eb *bus, struct cam_et *target,
 5208                    struct cam_ed *device)
 5209 {
 5210         int s;
 5211 
 5212         s = splcam();
 5213         if ((--device->refcount == 0)
 5214          && ((device->flags & CAM_DEV_UNCONFIGURED) != 0)) {
 5215                 struct cam_devq *devq;
 5216 
 5217                 if (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX
 5218                  || device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX)
 5219                         panic("Removing device while still queued for ccbs");
 5220 
 5221                 if ((device->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0)
 5222                                 untimeout(xpt_release_devq_timeout, device,
 5223                                           device->c_handle);
 5224 
 5225                 TAILQ_REMOVE(&target->ed_entries, device,links);
 5226                 target->generation++;
 5227                 xpt_max_ccbs -= device->ccbq.devq_openings;
 5228                 if (!SIM_DEAD(bus->sim)) {
 5229                         /* Release our slot in the devq */
 5230                         devq = bus->sim->devq;
 5231                         cam_devq_resize(devq, devq->alloc_queue.array_size - 1);
 5232                 }
 5233                 splx(s);
 5234                 camq_fini(&device->drvq);
 5235                 camq_fini(&device->ccbq.queue);
 5236                 free(device, M_CAMXPT);
 5237                 xpt_release_target(bus, target);
 5238         } else
 5239                 splx(s);
 5240 }
 5241 
 5242 static u_int32_t
 5243 xpt_dev_ccbq_resize(struct cam_path *path, int newopenings)
 5244 {
 5245         int     s;
 5246         int     diff;
 5247         int     result;
 5248         struct  cam_ed *dev;
 5249 
 5250         dev = path->device;
 5251         s = splsoftcam();
 5252 
 5253         diff = newopenings - (dev->ccbq.dev_active + dev->ccbq.dev_openings);
 5254         result = cam_ccbq_resize(&dev->ccbq, newopenings);
 5255         if (result == CAM_REQ_CMP && (diff < 0)) {
 5256                 dev->flags |= CAM_DEV_RESIZE_QUEUE_NEEDED;
 5257         }
 5258         if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
 5259          || (dev->inq_flags & SID_CmdQue) != 0)
 5260                 dev->tag_saved_openings = newopenings;
 5261         /* Adjust the global limit */
 5262         xpt_max_ccbs += diff;
 5263         splx(s);
 5264         return (result);
 5265 }
 5266 
 5267 static struct cam_eb *
 5268 xpt_find_bus(path_id_t path_id)
 5269 {
 5270         struct cam_eb *bus;
 5271 
 5272         for (bus = TAILQ_FIRST(&xpt_busses);
 5273              bus != NULL;
 5274              bus = TAILQ_NEXT(bus, links)) {
 5275                 if (bus->path_id == path_id) {
 5276                         bus->refcount++;
 5277                         break;
 5278                 }
 5279         }
 5280         return (bus);
 5281 }
 5282 
 5283 static struct cam_et *
 5284 xpt_find_target(struct cam_eb *bus, target_id_t target_id)
 5285 {
 5286         struct cam_et *target;
 5287 
 5288         for (target = TAILQ_FIRST(&bus->et_entries);
 5289              target != NULL;
 5290              target = TAILQ_NEXT(target, links)) {
 5291                 if (target->target_id == target_id) {
 5292                         target->refcount++;
 5293                         break;
 5294                 }
 5295         }
 5296         return (target);
 5297 }
 5298 
 5299 static struct cam_ed *
 5300 xpt_find_device(struct cam_et *target, lun_id_t lun_id)
 5301 {
 5302         struct cam_ed *device;
 5303 
 5304         for (device = TAILQ_FIRST(&target->ed_entries);
 5305              device != NULL;
 5306              device = TAILQ_NEXT(device, links)) {
 5307                 if (device->lun_id == lun_id) {
 5308                         device->refcount++;
 5309                         break;
 5310                 }
 5311         }
 5312         return (device);
 5313 }
 5314 
 5315 typedef struct {
 5316         union   ccb *request_ccb;
 5317         struct  ccb_pathinq *cpi;
 5318         int     counter;
 5319 } xpt_scan_bus_info;
 5320 
 5321 static void
 5322 xpt_scan_bus(struct cam_periph *periph, union ccb *request_ccb)
 5323 {
 5324         CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE,
 5325                   ("xpt_scan_bus\n"));
 5326         switch (request_ccb->ccb_h.func_code) {
 5327         case XPT_SCAN_BUS:
 5328         {
 5329                 xpt_scan_bus_info *scan_info;
 5330                 union   ccb *work_ccb;
 5331                 struct  cam_path *path;
 5332                 u_int i;
 5333                 u_int max_target;
 5334                 u_int initiator_id;
 5335 
 5336                 /* Find out the characteristics of the bus */
 5337                 work_ccb = xpt_alloc_ccb();
 5338                 xpt_setup_ccb(&work_ccb->ccb_h, request_ccb->ccb_h.path,
 5339                     request_ccb->ccb_h.pinfo.priority);
 5340                 work_ccb->ccb_h.func_code = XPT_PATH_INQ;
 5341                 xpt_action(work_ccb);
 5342                 if (work_ccb->ccb_h.status != CAM_REQ_CMP) {
 5343                         request_ccb->ccb_h.status = work_ccb->ccb_h.status;
 5344                         xpt_free_ccb(work_ccb);
 5345                         xpt_done(request_ccb);
 5346                         return;
 5347                 }
 5348 
 5349                 if ((work_ccb->cpi.hba_misc & PIM_NOINITIATOR) != 0) {
 5350                         /*
 5351                          * Can't scan the bus on an adapter that
 5352                          * cannot perform the initiator role.
 5353                          */
 5354                         request_ccb->ccb_h.status = CAM_REQ_CMP;
 5355                         xpt_free_ccb(work_ccb);
 5356                         xpt_done(request_ccb);
 5357                         return;
 5358                 }
 5359 
 5360                 /* Save some state for use while we probe for devices */
 5361                 scan_info = (xpt_scan_bus_info *)
 5362                     malloc(sizeof(xpt_scan_bus_info), M_TEMP, M_WAITOK);
 5363                 scan_info->request_ccb = request_ccb;
 5364                 scan_info->cpi = &work_ccb->cpi;
 5365 
 5366                 /* Cache on our stack so we can work asynchronously */
 5367                 max_target = scan_info->cpi->max_target;
 5368                 initiator_id = scan_info->cpi->initiator_id;
 5369 
 5370 
 5371                 /*
 5372                  * We can scan all targets in parallel, or do it sequentially.
 5373                  */
 5374                 if (scan_info->cpi->hba_misc & PIM_SEQSCAN) {
 5375                         max_target = 0;
 5376                         scan_info->counter = 0;
 5377                 } else {
 5378                         scan_info->counter = scan_info->cpi->max_target + 1;
 5379                         if (scan_info->cpi->initiator_id < scan_info->counter) {
 5380                                 scan_info->counter--;
 5381                         }
 5382                 }
 5383                 
 5384                 for (i = 0; i <= max_target; i++) {
 5385                         cam_status status;
 5386                         if (i == initiator_id)
 5387                                 continue;
 5388 
 5389                         status = xpt_create_path(&path, xpt_periph,
 5390                                                  request_ccb->ccb_h.path_id,
 5391                                                  i, 0);
 5392                         if (status != CAM_REQ_CMP) {
 5393                                 printf("xpt_scan_bus: xpt_create_path failed"
 5394                                        " with status %#x, bus scan halted\n",
 5395                                        status);
 5396                                 free(scan_info, M_TEMP);
 5397                                 request_ccb->ccb_h.status = status;
 5398                                 xpt_free_ccb(work_ccb);
 5399                                 xpt_done(request_ccb);
 5400                                 break;
 5401                         }
 5402                         work_ccb = xpt_alloc_ccb();
 5403                         xpt_setup_ccb(&work_ccb->ccb_h, path,
 5404                             request_ccb->ccb_h.pinfo.priority);
 5405                         work_ccb->ccb_h.func_code = XPT_SCAN_LUN;
 5406                         work_ccb->ccb_h.cbfcnp = xpt_scan_bus;
 5407                         work_ccb->ccb_h.ppriv_ptr0 = scan_info;
 5408                         work_ccb->crcn.flags = request_ccb->crcn.flags;
 5409                         xpt_action(work_ccb);
 5410                 }
 5411                 break;
 5412         }
 5413         case XPT_SCAN_LUN:
 5414         {
 5415                 cam_status status;
 5416                 struct cam_path *path;
 5417                 xpt_scan_bus_info *scan_info;
 5418                 path_id_t path_id;
 5419                 target_id_t target_id;
 5420                 lun_id_t lun_id;
 5421 
 5422                 /* Reuse the same CCB to query if a device was really found */
 5423                 scan_info = (xpt_scan_bus_info *)request_ccb->ccb_h.ppriv_ptr0;
 5424                 xpt_setup_ccb(&request_ccb->ccb_h, request_ccb->ccb_h.path,
 5425                     request_ccb->ccb_h.pinfo.priority);
 5426                 request_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
 5427 
 5428                 path_id = request_ccb->ccb_h.path_id;
 5429                 target_id = request_ccb->ccb_h.target_id;
 5430                 lun_id = request_ccb->ccb_h.target_lun;
 5431                 xpt_action(request_ccb);
 5432 
 5433                 if (request_ccb->ccb_h.status != CAM_REQ_CMP) {
 5434                         struct cam_ed *device;
 5435                         struct cam_et *target;
 5436                         int s, phl;
 5437 
 5438                         /*
 5439                          * If we already probed lun 0 successfully, or
 5440                          * we have additional configured luns on this
 5441                          * target that might have "gone away", go onto
 5442                          * the next lun.
 5443                          */
 5444                         target = request_ccb->ccb_h.path->target;
 5445                         /*
 5446                          * We may touch devices that we don't
 5447                          * hold references too, so ensure they
 5448                          * don't disappear out from under us.
 5449                          * The target above is referenced by the
 5450                          * path in the request ccb.
 5451                          */
 5452                         phl = 0;
 5453                         s = splcam();
 5454                         device = TAILQ_FIRST(&target->ed_entries);
 5455                         if (device != NULL) {
 5456                                 phl = CAN_SRCH_HI_SPARSE(device);
 5457                                 if (device->lun_id == 0)
 5458                                         device = TAILQ_NEXT(device, links);
 5459                         }
 5460                         splx(s);
 5461                         if ((lun_id != 0) || (device != NULL)) {
 5462                                 if (lun_id < (CAM_SCSI2_MAXLUN-1) || phl)
 5463                                         lun_id++;
 5464                         }
 5465                 } else {
 5466                         struct cam_ed *device;
 5467                         
 5468                         device = request_ccb->ccb_h.path->device;
 5469 
 5470                         if ((device->quirk->quirks & CAM_QUIRK_NOLUNS) == 0) {
 5471                                 /* Try the next lun */
 5472                                 if (lun_id < (CAM_SCSI2_MAXLUN-1)
 5473                                   || CAN_SRCH_HI_DENSE(device))
 5474                                         lun_id++;
 5475                         }
 5476                 }
 5477 
 5478                 /*
 5479                  * Free the current request path- we're done with it.
 5480                  */
 5481                 xpt_free_path(request_ccb->ccb_h.path);
 5482 
 5483                 /*
 5484                  * Check to see if we scan any further luns.
 5485                  */
 5486                 if (lun_id == request_ccb->ccb_h.target_lun
 5487                  || lun_id > scan_info->cpi->max_lun) {
 5488                         int done;
 5489 
 5490  hop_again:
 5491                         done = 0;
 5492                         if (scan_info->cpi->hba_misc & PIM_SEQSCAN) {
 5493                                 scan_info->counter++;
 5494                                 if (scan_info->counter == 
 5495                                     scan_info->cpi->initiator_id) {
 5496                                         scan_info->counter++;
 5497                                 }
 5498                                 if (scan_info->counter >=
 5499                                     scan_info->cpi->max_target+1) {
 5500                                         done = 1;
 5501                                 }
 5502                         } else {
 5503                                 scan_info->counter--;
 5504                                 if (scan_info->counter == 0) {
 5505                                         done = 1;
 5506                                 }
 5507                         }
 5508                         if (done) {
 5509                                 xpt_free_ccb(request_ccb);
 5510                                 xpt_free_ccb((union ccb *)scan_info->cpi);
 5511                                 request_ccb = scan_info->request_ccb;
 5512                                 free(scan_info, M_TEMP);
 5513                                 request_ccb->ccb_h.status = CAM_REQ_CMP;
 5514                                 xpt_done(request_ccb);
 5515                                 break;
 5516                         }
 5517 
 5518                         if ((scan_info->cpi->hba_misc & PIM_SEQSCAN) == 0) {
 5519                                 break;
 5520                         }
 5521                         status = xpt_create_path(&path, xpt_periph,
 5522                             scan_info->request_ccb->ccb_h.path_id,
 5523                             scan_info->counter, 0);
 5524                         if (status != CAM_REQ_CMP) {
 5525                                 printf("xpt_scan_bus: xpt_create_path failed"
 5526                                     " with status %#x, bus scan halted\n",
 5527                                     status);
 5528                                 xpt_free_ccb(request_ccb);
 5529                                 xpt_free_ccb((union ccb *)scan_info->cpi);
 5530                                 request_ccb = scan_info->request_ccb;
 5531                                 free(scan_info, M_TEMP);
 5532                                 request_ccb->ccb_h.status = status;
 5533                                 xpt_done(request_ccb);
 5534                                 break;
 5535                         }
 5536                         xpt_setup_ccb(&request_ccb->ccb_h, path,
 5537                             request_ccb->ccb_h.pinfo.priority);
 5538                         request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
 5539                         request_ccb->ccb_h.cbfcnp = xpt_scan_bus;
 5540                         request_ccb->ccb_h.ppriv_ptr0 = scan_info;
 5541                         request_ccb->crcn.flags =
 5542                             scan_info->request_ccb->crcn.flags;
 5543                 } else {
 5544                         status = xpt_create_path(&path, xpt_periph,
 5545                             path_id, target_id, lun_id);
 5546                         if (status != CAM_REQ_CMP) {
 5547                                 printf("xpt_scan_bus: xpt_create_path failed "
 5548                                        "with status %#x, halting LUN scan\n",
 5549                                        status);
 5550                                 goto hop_again;
 5551                         }
 5552                         xpt_setup_ccb(&request_ccb->ccb_h, path,
 5553                               request_ccb->ccb_h.pinfo.priority);
 5554                         request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
 5555                         request_ccb->ccb_h.cbfcnp = xpt_scan_bus;
 5556                         request_ccb->ccb_h.ppriv_ptr0 = scan_info;
 5557                         request_ccb->crcn.flags =
 5558                                 scan_info->request_ccb->crcn.flags;
 5559                 }
 5560                 xpt_action(request_ccb);
 5561                 break;
 5562         }
 5563         default:
 5564                 break;
 5565         }
 5566 }
 5567 
 5568 typedef enum {
 5569         PROBE_TUR,
 5570         PROBE_INQUIRY,
 5571         PROBE_FULL_INQUIRY,
 5572         PROBE_MODE_SENSE,
 5573         PROBE_SERIAL_NUM,
 5574         PROBE_TUR_FOR_NEGOTIATION
 5575 } probe_action;
 5576 
 5577 typedef enum {
 5578         PROBE_INQUIRY_CKSUM     = 0x01,
 5579         PROBE_SERIAL_CKSUM      = 0x02,
 5580         PROBE_NO_ANNOUNCE       = 0x04
 5581 } probe_flags;
 5582 
 5583 typedef struct {
 5584         TAILQ_HEAD(, ccb_hdr) request_ccbs;
 5585         probe_action    action;
 5586         union ccb       saved_ccb;
 5587         probe_flags     flags;
 5588         MD5_CTX         context;
 5589         u_int8_t        digest[16];
 5590 } probe_softc;
 5591 
 5592 static void
 5593 xpt_scan_lun(struct cam_periph *periph, struct cam_path *path,
 5594              cam_flags flags, union ccb *request_ccb)
 5595 {
 5596         struct ccb_pathinq cpi;
 5597         cam_status status;
 5598         struct cam_path *new_path;
 5599         struct cam_periph *old_periph;
 5600         int s;
 5601         
 5602         CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE,
 5603                   ("xpt_scan_lun\n"));
 5604         
 5605         xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
 5606         cpi.ccb_h.func_code = XPT_PATH_INQ;
 5607         xpt_action((union ccb *)&cpi);
 5608 
 5609         if (cpi.ccb_h.status != CAM_REQ_CMP) {
 5610                 if (request_ccb != NULL) {
 5611                         request_ccb->ccb_h.status = cpi.ccb_h.status;
 5612                         xpt_done(request_ccb);
 5613                 }
 5614                 return;
 5615         }
 5616 
 5617         if ((cpi.hba_misc & PIM_NOINITIATOR) != 0) {
 5618                 /*
 5619                  * Can't scan the bus on an adapter that
 5620                  * cannot perform the initiator role.
 5621                  */
 5622                 if (request_ccb != NULL) {
 5623                         request_ccb->ccb_h.status = CAM_REQ_CMP;
 5624                         xpt_done(request_ccb);
 5625                 }
 5626                 return;
 5627         }
 5628 
 5629         if (request_ccb == NULL) {
 5630                 request_ccb = malloc(sizeof(union ccb), M_TEMP, M_NOWAIT);
 5631                 if (request_ccb == NULL) {
 5632                         xpt_print_path(path);
 5633                         printf("xpt_scan_lun: can't allocate CCB, can't "
 5634                                "continue\n");
 5635                         return;
 5636                 }
 5637                 new_path = malloc(sizeof(*new_path), M_TEMP, M_NOWAIT);
 5638                 if (new_path == NULL) {
 5639                         xpt_print_path(path);
 5640                         printf("xpt_scan_lun: can't allocate path, can't "
 5641                                "continue\n");
 5642                         free(request_ccb, M_TEMP);
 5643                         return;
 5644                 }
 5645                 status = xpt_compile_path(new_path, xpt_periph,
 5646                                           path->bus->path_id,
 5647                                           path->target->target_id,
 5648                                           path->device->lun_id);
 5649 
 5650                 if (status != CAM_REQ_CMP) {
 5651                         xpt_print_path(path);
 5652                         printf("xpt_scan_lun: can't compile path, can't "
 5653                                "continue\n");
 5654                         free(request_ccb, M_TEMP);
 5655                         free(new_path, M_TEMP);
 5656                         return;
 5657                 }
 5658                 xpt_setup_ccb(&request_ccb->ccb_h, new_path, /*priority*/ 1);
 5659                 request_ccb->ccb_h.cbfcnp = xptscandone;
 5660                 request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
 5661                 request_ccb->crcn.flags = flags;
 5662         }
 5663 
 5664         s = splsoftcam();
 5665         if ((old_periph = cam_periph_find(path, "probe")) != NULL) {
 5666                 probe_softc *softc;
 5667 
 5668                 softc = (probe_softc *)old_periph->softc;
 5669                 TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h,
 5670                                   periph_links.tqe);
 5671         } else {
 5672                 status = cam_periph_alloc(proberegister, NULL, probecleanup,
 5673                                           probestart, "probe",
 5674                                           CAM_PERIPH_BIO,
 5675                                           request_ccb->ccb_h.path, NULL, 0,
 5676                                           request_ccb);
 5677 
 5678                 if (status != CAM_REQ_CMP) {
 5679                         xpt_print_path(path);
 5680                         printf("xpt_scan_lun: cam_alloc_periph returned an "
 5681                                "error, can't continue probe\n");
 5682                         request_ccb->ccb_h.status = status;
 5683                         xpt_done(request_ccb);
 5684                 }
 5685         }
 5686         splx(s);
 5687 }
 5688 
 5689 static void
 5690 xptscandone(struct cam_periph *periph, union ccb *done_ccb)
 5691 {
 5692         xpt_release_path(done_ccb->ccb_h.path);
 5693         free(done_ccb->ccb_h.path, M_TEMP);
 5694         free(done_ccb, M_TEMP);
 5695 }
 5696 
 5697 static cam_status
 5698 proberegister(struct cam_periph *periph, void *arg)
 5699 {
 5700         union ccb *request_ccb; /* CCB representing the probe request */
 5701         probe_softc *softc;
 5702 
 5703         request_ccb = (union ccb *)arg;
 5704         if (periph == NULL) {
 5705                 printf("proberegister: periph was NULL!!\n");
 5706                 return(CAM_REQ_CMP_ERR);
 5707         }
 5708 
 5709         if (request_ccb == NULL) {
 5710                 printf("proberegister: no probe CCB, "
 5711                        "can't register device\n");
 5712                 return(CAM_REQ_CMP_ERR);
 5713         }
 5714 
 5715         softc = (probe_softc *)malloc(sizeof(*softc), M_TEMP, M_NOWAIT);
 5716 
 5717         if (softc == NULL) {
 5718                 printf("proberegister: Unable to probe new device. "
 5719                        "Unable to allocate softc\n");                           
 5720                 return(CAM_REQ_CMP_ERR);
 5721         }
 5722         TAILQ_INIT(&softc->request_ccbs);
 5723         TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h,
 5724                           periph_links.tqe);
 5725         softc->flags = 0;
 5726         periph->softc = softc;
 5727         cam_periph_acquire(periph);
 5728         /*
 5729          * Ensure we've waited at least a bus settle
 5730          * delay before attempting to probe the device.
 5731          * For HBAs that don't do bus resets, this won't make a difference.
 5732          */
 5733         cam_periph_freeze_after_event(periph, &periph->path->bus->last_reset,
 5734                                       scsi_delay);
 5735         probeschedule(periph);
 5736         return(CAM_REQ_CMP);
 5737 }
 5738 
 5739 static void
 5740 probeschedule(struct cam_periph *periph)
 5741 {
 5742         struct ccb_pathinq cpi;
 5743         union ccb *ccb;
 5744         probe_softc *softc;
 5745 
 5746         softc = (probe_softc *)periph->softc;
 5747         ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs);
 5748 
 5749         xpt_setup_ccb(&cpi.ccb_h, periph->path, /*priority*/1);
 5750         cpi.ccb_h.func_code = XPT_PATH_INQ;
 5751         xpt_action((union ccb *)&cpi);
 5752 
 5753         /*
 5754          * If a device has gone away and another device, or the same one,
 5755          * is back in the same place, it should have a unit attention
 5756          * condition pending.  It will not report the unit attention in
 5757          * response to an inquiry, which may leave invalid transfer
 5758          * negotiations in effect.  The TUR will reveal the unit attention
 5759          * condition.  Only send the TUR for lun 0, since some devices 
 5760          * will get confused by commands other than inquiry to non-existent
 5761          * luns.  If you think a device has gone away start your scan from
 5762          * lun 0.  This will insure that any bogus transfer settings are
 5763          * invalidated.
 5764          *
 5765          * If we haven't seen the device before and the controller supports
 5766          * some kind of transfer negotiation, negotiate with the first
 5767          * sent command if no bus reset was performed at startup.  This
 5768          * ensures that the device is not confused by transfer negotiation
 5769          * settings left over by loader or BIOS action.
 5770          */
 5771         if (((ccb->ccb_h.path->device->flags & CAM_DEV_UNCONFIGURED) == 0)
 5772          && (ccb->ccb_h.target_lun == 0)) {
 5773                 softc->action = PROBE_TUR;
 5774         } else if ((cpi.hba_inquiry & (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE)) != 0
 5775               && (cpi.hba_misc & PIM_NOBUSRESET) != 0) {
 5776                 proberequestdefaultnegotiation(periph);
 5777                 softc->action = PROBE_INQUIRY;
 5778         } else {
 5779                 softc->action = PROBE_INQUIRY;
 5780         }
 5781 
 5782         if (ccb->crcn.flags & CAM_EXPECT_INQ_CHANGE)
 5783                 softc->flags |= PROBE_NO_ANNOUNCE;
 5784         else
 5785                 softc->flags &= ~PROBE_NO_ANNOUNCE;
 5786 
 5787         xpt_schedule(periph, ccb->ccb_h.pinfo.priority);
 5788 }
 5789 
 5790 static void
 5791 probestart(struct cam_periph *periph, union ccb *start_ccb)
 5792 {
 5793         /* Probe the device that our peripheral driver points to */
 5794         struct ccb_scsiio *csio;
 5795         probe_softc *softc;
 5796 
 5797         CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probestart\n"));
 5798 
 5799         softc = (probe_softc *)periph->softc;
 5800         csio = &start_ccb->csio;
 5801 
 5802         switch (softc->action) {
 5803         case PROBE_TUR:
 5804         case PROBE_TUR_FOR_NEGOTIATION:
 5805         {
 5806                 scsi_test_unit_ready(csio,
 5807                                      /*retries*/4,
 5808                                      probedone,
 5809                                      MSG_SIMPLE_Q_TAG,
 5810                                      SSD_FULL_SIZE,
 5811                                      /*timeout*/60000);
 5812                 break;
 5813         }
 5814         case PROBE_INQUIRY:
 5815         case PROBE_FULL_INQUIRY:
 5816         {
 5817                 u_int inquiry_len;
 5818                 struct scsi_inquiry_data *inq_buf;
 5819 
 5820                 inq_buf = &periph->path->device->inq_data;
 5821                 /*
 5822                  * If the device is currently configured, we calculate an
 5823                  * MD5 checksum of the inquiry data, and if the serial number
 5824                  * length is greater than 0, add the serial number data
 5825                  * into the checksum as well.  Once the inquiry and the
 5826                  * serial number check finish, we attempt to figure out
 5827                  * whether we still have the same device.
 5828                  */
 5829                 if ((periph->path->device->flags & CAM_DEV_UNCONFIGURED) == 0) {
 5830                         
 5831                         MD5Init(&softc->context);
 5832                         MD5Update(&softc->context, (unsigned char *)inq_buf,
 5833                                   sizeof(struct scsi_inquiry_data));
 5834                         softc->flags |= PROBE_INQUIRY_CKSUM;
 5835                         if (periph->path->device->serial_num_len > 0) {
 5836                                 MD5Update(&softc->context,
 5837                                           periph->path->device->serial_num,
 5838                                           periph->path->device->serial_num_len);
 5839                                 softc->flags |= PROBE_SERIAL_CKSUM;
 5840                         }
 5841                         MD5Final(softc->digest, &softc->context);
 5842                 } 
 5843 
 5844                 if (softc->action == PROBE_INQUIRY)
 5845                         inquiry_len = SHORT_INQUIRY_LENGTH;
 5846                 else
 5847                         inquiry_len = inq_buf->additional_length
 5848                                     + offsetof(struct scsi_inquiry_data,
 5849                                                additional_length) + 1;
 5850 
 5851                 /*
 5852                  * Some parallel SCSI devices fail to send an
 5853                  * ignore wide residue message when dealing with
 5854                  * odd length inquiry requests.  Round up to be
 5855                  * safe.
 5856                  */
 5857                 inquiry_len = roundup2(inquiry_len, 2);
 5858         
 5859                 scsi_inquiry(csio,
 5860                              /*retries*/4,
 5861                              probedone,
 5862                              MSG_SIMPLE_Q_TAG,
 5863                              (u_int8_t *)inq_buf,
 5864                              inquiry_len,
 5865                              /*evpd*/FALSE,
 5866                              /*page_code*/0,
 5867                              SSD_MIN_SIZE,
 5868                              /*timeout*/60 * 1000);
 5869                 break;
 5870         }
 5871         case PROBE_MODE_SENSE:
 5872         {
 5873                 void  *mode_buf;
 5874                 int    mode_buf_len;
 5875 
 5876                 mode_buf_len = sizeof(struct scsi_mode_header_6)
 5877                              + sizeof(struct scsi_mode_blk_desc)
 5878                              + sizeof(struct scsi_control_page);
 5879                 mode_buf = malloc(mode_buf_len, M_TEMP, M_NOWAIT);
 5880                 if (mode_buf != NULL) {
 5881                         scsi_mode_sense(csio,
 5882                                         /*retries*/4,
 5883                                         probedone,
 5884                                         MSG_SIMPLE_Q_TAG,
 5885                                         /*dbd*/FALSE,
 5886                                         SMS_PAGE_CTRL_CURRENT,
 5887                                         SMS_CONTROL_MODE_PAGE,
 5888                                         mode_buf,
 5889                                         mode_buf_len,
 5890                                         SSD_FULL_SIZE,
 5891                                         /*timeout*/60000);
 5892                         break;
 5893                 }
 5894                 xpt_print_path(periph->path);
 5895                 printf("Unable to mode sense control page - malloc failure\n");
 5896                 softc->action = PROBE_SERIAL_NUM;
 5897         }
 5898         /* FALLTHROUGH */
 5899         case PROBE_SERIAL_NUM:
 5900         {
 5901                 struct scsi_vpd_unit_serial_number *serial_buf;
 5902                 struct cam_ed* device;
 5903 
 5904                 serial_buf = NULL;
 5905                 device = periph->path->device;
 5906                 device->serial_num = NULL;
 5907                 device->serial_num_len = 0;
 5908 
 5909                 if ((device->quirk->quirks & CAM_QUIRK_NOSERIAL) == 0)
 5910                         serial_buf = (struct scsi_vpd_unit_serial_number *)
 5911                                 malloc(sizeof(*serial_buf), M_TEMP,
 5912                                         M_NOWAIT | M_ZERO);
 5913 
 5914                 if (serial_buf != NULL) {
 5915                         scsi_inquiry(csio,
 5916                                      /*retries*/4,
 5917                                      probedone,
 5918                                      MSG_SIMPLE_Q_TAG,
 5919                                      (u_int8_t *)serial_buf,
 5920                                      sizeof(*serial_buf),
 5921                                      /*evpd*/TRUE,
 5922                                      SVPD_UNIT_SERIAL_NUMBER,
 5923                                      SSD_MIN_SIZE,
 5924                                      /*timeout*/60 * 1000);
 5925                         break;
 5926                 }
 5927                 /*
 5928                  * We'll have to do without, let our probedone
 5929                  * routine finish up for us.
 5930                  */
 5931                 start_ccb->csio.data_ptr = NULL;
 5932                 probedone(periph, start_ccb);
 5933                 return;
 5934         }
 5935         }
 5936         xpt_action(start_ccb);
 5937 }
 5938 
 5939 static void
 5940 proberequestdefaultnegotiation(struct cam_periph *periph)
 5941 {
 5942         struct ccb_trans_settings cts;
 5943 
 5944         xpt_setup_ccb(&cts.ccb_h, periph->path, /*priority*/1);
 5945         cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
 5946 #ifdef CAM_NEW_TRAN_CODE
 5947         cts.type = CTS_TYPE_USER_SETTINGS;
 5948 #else /* CAM_NEW_TRAN_CODE */
 5949         cts.flags = CCB_TRANS_USER_SETTINGS;
 5950 #endif /* CAM_NEW_TRAN_CODE */
 5951         xpt_action((union ccb *)&cts);
 5952         cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
 5953 #ifdef CAM_NEW_TRAN_CODE
 5954         cts.type = CTS_TYPE_CURRENT_SETTINGS;
 5955 #else /* CAM_NEW_TRAN_CODE */
 5956         cts.flags &= ~CCB_TRANS_USER_SETTINGS;
 5957         cts.flags |= CCB_TRANS_CURRENT_SETTINGS;
 5958 #endif /* CAM_NEW_TRAN_CODE */
 5959         xpt_action((union ccb *)&cts);
 5960 }
 5961 
 5962 static void
 5963 probedone(struct cam_periph *periph, union ccb *done_ccb)
 5964 {
 5965         probe_softc *softc;
 5966         struct cam_path *path;
 5967         u_int32_t  priority;
 5968 
 5969         CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probedone\n"));
 5970 
 5971         softc = (probe_softc *)periph->softc;
 5972         path = done_ccb->ccb_h.path;
 5973         priority = done_ccb->ccb_h.pinfo.priority;
 5974 
 5975         switch (softc->action) {
 5976         case PROBE_TUR:
 5977         {
 5978                 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
 5979 
 5980                         if (cam_periph_error(done_ccb, 0,
 5981                                              SF_NO_PRINT, NULL) == ERESTART)
 5982                                 return;
 5983                         else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
 5984                                 /* Don't wedge the queue */
 5985                                 xpt_release_devq(done_ccb->ccb_h.path,
 5986                                                  /*count*/1,
 5987                                                  /*run_queue*/TRUE);
 5988                 }
 5989                 softc->action = PROBE_INQUIRY;
 5990                 xpt_release_ccb(done_ccb);
 5991                 xpt_schedule(periph, priority);
 5992                 return;
 5993         }
 5994         case PROBE_INQUIRY:
 5995         case PROBE_FULL_INQUIRY:
 5996         {
 5997                 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
 5998                         struct scsi_inquiry_data *inq_buf;
 5999                         u_int8_t periph_qual;
 6000 
 6001                         path->device->flags |= CAM_DEV_INQUIRY_DATA_VALID;
 6002                         inq_buf = &path->device->inq_data;
 6003 
 6004                         periph_qual = SID_QUAL(inq_buf);
 6005                         
 6006                         switch(periph_qual) {
 6007                         case SID_QUAL_LU_CONNECTED:
 6008                         {
 6009                                 u_int8_t len;
 6010 
 6011                                 /*
 6012                                  * We conservatively request only
 6013                                  * SHORT_INQUIRY_LEN bytes of inquiry
 6014                                  * information during our first try
 6015                                  * at sending an INQUIRY. If the device
 6016                                  * has more information to give,
 6017                                  * perform a second request specifying
 6018                                  * the amount of information the device
 6019                                  * is willing to give.
 6020                                  */
 6021                                 len = inq_buf->additional_length
 6022                                     + offsetof(struct scsi_inquiry_data,
 6023                                                additional_length) + 1;
 6024                                 if (softc->action == PROBE_INQUIRY
 6025                                  && len > SHORT_INQUIRY_LENGTH) {
 6026                                         softc->action = PROBE_FULL_INQUIRY;
 6027                                         xpt_release_ccb(done_ccb);
 6028                                         xpt_schedule(periph, priority);
 6029                                         return;
 6030                                 }
 6031 
 6032                                 xpt_find_quirk(path->device);
 6033 
 6034 #ifdef CAM_NEW_TRAN_CODE
 6035                                 xpt_devise_transport(path);
 6036 #endif /* CAM_NEW_TRAN_CODE */
 6037                                 if (INQ_DATA_TQ_ENABLED(inq_buf))
 6038                                         softc->action = PROBE_MODE_SENSE;
 6039                                 else
 6040                                         softc->action = PROBE_SERIAL_NUM;
 6041 
 6042                                 path->device->flags &= ~CAM_DEV_UNCONFIGURED;
 6043 
 6044                                 xpt_release_ccb(done_ccb);
 6045                                 xpt_schedule(periph, priority);
 6046                                 return;
 6047                         }
 6048                         default:
 6049                                 break;
 6050                         }
 6051                 } else if (cam_periph_error(done_ccb, 0,
 6052                                             done_ccb->ccb_h.target_lun > 0
 6053                                             ? SF_RETRY_UA|SF_QUIET_IR
 6054                                             : SF_RETRY_UA,
 6055                                             &softc->saved_ccb) == ERESTART) {
 6056                         return;
 6057                 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
 6058                         /* Don't wedge the queue */
 6059                         xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
 6060                                          /*run_queue*/TRUE);
 6061                 }
 6062                 /*
 6063                  * If we get to this point, we got an error status back
 6064                  * from the inquiry and the error status doesn't require
 6065                  * automatically retrying the command.  Therefore, the
 6066                  * inquiry failed.  If we had inquiry information before
 6067                  * for this device, but this latest inquiry command failed,
 6068                  * the device has probably gone away.  If this device isn't
 6069                  * already marked unconfigured, notify the peripheral
 6070                  * drivers that this device is no more.
 6071                  */
 6072                 if ((path->device->flags & CAM_DEV_UNCONFIGURED) == 0)
 6073                         /* Send the async notification. */
 6074                         xpt_async(AC_LOST_DEVICE, path, NULL);
 6075 
 6076                 xpt_release_ccb(done_ccb);
 6077                 break;
 6078         }
 6079         case PROBE_MODE_SENSE:
 6080         {
 6081                 struct ccb_scsiio *csio;
 6082                 struct scsi_mode_header_6 *mode_hdr;
 6083 
 6084                 csio = &done_ccb->csio;
 6085                 mode_hdr = (struct scsi_mode_header_6 *)csio->data_ptr;
 6086                 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
 6087                         struct scsi_control_page *page;
 6088                         u_int8_t *offset;
 6089 
 6090                         offset = ((u_int8_t *)&mode_hdr[1])
 6091                             + mode_hdr->blk_desc_len;
 6092                         page = (struct scsi_control_page *)offset;
 6093                         path->device->queue_flags = page->queue_flags;
 6094                 } else if (cam_periph_error(done_ccb, 0,
 6095                                             SF_RETRY_UA|SF_NO_PRINT,
 6096                                             &softc->saved_ccb) == ERESTART) {
 6097                         return;
 6098                 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
 6099                         /* Don't wedge the queue */
 6100                         xpt_release_devq(done_ccb->ccb_h.path,
 6101                                          /*count*/1, /*run_queue*/TRUE);
 6102                 }
 6103                 xpt_release_ccb(done_ccb);
 6104                 free(mode_hdr, M_TEMP);
 6105                 softc->action = PROBE_SERIAL_NUM;
 6106                 xpt_schedule(periph, priority);
 6107                 return;
 6108         }
 6109         case PROBE_SERIAL_NUM:
 6110         {
 6111                 struct ccb_scsiio *csio;
 6112                 struct scsi_vpd_unit_serial_number *serial_buf;
 6113                 u_int32_t  priority;
 6114                 int changed;
 6115                 int have_serialnum;
 6116 
 6117                 changed = 1;
 6118                 have_serialnum = 0;
 6119                 csio = &done_ccb->csio;
 6120                 priority = done_ccb->ccb_h.pinfo.priority;
 6121                 serial_buf =
 6122                     (struct scsi_vpd_unit_serial_number *)csio->data_ptr;
 6123 
 6124                 /* Clean up from previous instance of this device */
 6125                 if (path->device->serial_num != NULL) {
 6126                         free(path->device->serial_num, M_CAMXPT);
 6127                         path->device->serial_num = NULL;
 6128                         path->device->serial_num_len = 0;
 6129                 }
 6130 
 6131                 if (serial_buf == NULL) {
 6132                         /*
 6133                          * Don't process the command as it was never sent
 6134                          */
 6135                 } else if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP
 6136                         && (serial_buf->length > 0)) {
 6137 
 6138                         have_serialnum = 1;
 6139                         path->device->serial_num =
 6140                                 (u_int8_t *)malloc((serial_buf->length + 1),
 6141                                                    M_CAMXPT, M_NOWAIT);
 6142                         if (path->device->serial_num != NULL) {
 6143                                 bcopy(serial_buf->serial_num,
 6144                                       path->device->serial_num,
 6145                                       serial_buf->length);
 6146                                 path->device->serial_num_len =
 6147                                     serial_buf->length;
 6148                                 path->device->serial_num[serial_buf->length]
 6149                                     = '\0';
 6150                         }
 6151                 } else if (cam_periph_error(done_ccb, 0,
 6152                                             SF_RETRY_UA|SF_NO_PRINT,
 6153                                             &softc->saved_ccb) == ERESTART) {
 6154                         return;
 6155                 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
 6156                         /* Don't wedge the queue */
 6157                         xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
 6158                                          /*run_queue*/TRUE);
 6159                 }
 6160                 
 6161                 /*
 6162                  * Let's see if we have seen this device before.
 6163                  */
 6164                 if ((softc->flags & PROBE_INQUIRY_CKSUM) != 0) {
 6165                         MD5_CTX context;
 6166                         u_int8_t digest[16];
 6167 
 6168                         MD5Init(&context);
 6169                         
 6170                         MD5Update(&context,
 6171                                   (unsigned char *)&path->device->inq_data,
 6172                                   sizeof(struct scsi_inquiry_data));
 6173 
 6174                         if (have_serialnum)
 6175                                 MD5Update(&context, serial_buf->serial_num,
 6176                                           serial_buf->length);
 6177 
 6178                         MD5Final(digest, &context);
 6179                         if (bcmp(softc->digest, digest, 16) == 0)
 6180                                 changed = 0;
 6181 
 6182                         /*
 6183                          * XXX Do we need to do a TUR in order to ensure
 6184                          *     that the device really hasn't changed???
 6185                          */
 6186                         if ((changed != 0)
 6187                          && ((softc->flags & PROBE_NO_ANNOUNCE) == 0))
 6188                                 xpt_async(AC_LOST_DEVICE, path, NULL);
 6189                 }
 6190                 if (serial_buf != NULL)
 6191                         free(serial_buf, M_TEMP);
 6192 
 6193                 if (changed != 0) {
 6194                         /*
 6195                          * Now that we have all the necessary
 6196                          * information to safely perform transfer
 6197                          * negotiations... Controllers don't perform
 6198                          * any negotiation or tagged queuing until
 6199                          * after the first XPT_SET_TRAN_SETTINGS ccb is
 6200                          * received.  So, on a new device, just retreive
 6201                          * the user settings, and set them as the current
 6202                          * settings to set the device up.
 6203                          */
 6204                         proberequestdefaultnegotiation(periph);
 6205                         xpt_release_ccb(done_ccb);
 6206 
 6207                         /*
 6208                          * Perform a TUR to allow the controller to
 6209                          * perform any necessary transfer negotiation.
 6210                          */
 6211                         softc->action = PROBE_TUR_FOR_NEGOTIATION;
 6212                         xpt_schedule(periph, priority);
 6213                         return;
 6214                 }
 6215                 xpt_release_ccb(done_ccb);
 6216                 break;
 6217         }
 6218         case PROBE_TUR_FOR_NEGOTIATION:
 6219                 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
 6220                         /* Don't wedge the queue */
 6221                         xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
 6222                                          /*run_queue*/TRUE);
 6223                 }
 6224 
 6225                 path->device->flags &= ~CAM_DEV_UNCONFIGURED;
 6226 
 6227                 if ((softc->flags & PROBE_NO_ANNOUNCE) == 0) {
 6228                         /* Inform the XPT that a new device has been found */
 6229                         done_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
 6230                         xpt_action(done_ccb);
 6231 
 6232                         xpt_async(AC_FOUND_DEVICE, done_ccb->ccb_h.path,
 6233                                   done_ccb);
 6234                 }
 6235                 xpt_release_ccb(done_ccb);
 6236                 break;
 6237         }
 6238         done_ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs);
 6239         TAILQ_REMOVE(&softc->request_ccbs, &done_ccb->ccb_h, periph_links.tqe);
 6240         done_ccb->ccb_h.status = CAM_REQ_CMP;
 6241         xpt_done(done_ccb);
 6242         if (TAILQ_FIRST(&softc->request_ccbs) == NULL) {
 6243                 cam_periph_invalidate(periph);
 6244                 cam_periph_release(periph);
 6245         } else {
 6246                 probeschedule(periph);
 6247         }
 6248 }
 6249 
 6250 static void
 6251 probecleanup(struct cam_periph *periph)
 6252 {
 6253         free(periph->softc, M_TEMP);
 6254 }
 6255 
 6256 static void
 6257 xpt_find_quirk(struct cam_ed *device)
 6258 {
 6259         caddr_t match;
 6260 
 6261         match = cam_quirkmatch((caddr_t)&device->inq_data,
 6262                                (caddr_t)xpt_quirk_table,
 6263                                sizeof(xpt_quirk_table)/sizeof(*xpt_quirk_table),
 6264                                sizeof(*xpt_quirk_table), scsi_inquiry_match);
 6265 
 6266         if (match == NULL)
 6267                 panic("xpt_find_quirk: device didn't match wildcard entry!!");
 6268 
 6269         device->quirk = (struct xpt_quirk_entry *)match;
 6270 }
 6271 
 6272 static int
 6273 sysctl_cam_search_luns(SYSCTL_HANDLER_ARGS)
 6274 {
 6275         int error, bool;
 6276 
 6277         bool = cam_srch_hi;
 6278         error = sysctl_handle_int(oidp, &bool, sizeof(bool), req);
 6279         if (error != 0 || req->newptr == NULL)
 6280                 return (error);
 6281         if (bool == 0 || bool == 1) {
 6282                 cam_srch_hi = bool;
 6283                 return (0);
 6284         } else {
 6285                 return (EINVAL);
 6286         }
 6287 }
 6288 
 6289 #ifdef CAM_NEW_TRAN_CODE
 6290 
 6291 static void
 6292 xpt_devise_transport(struct cam_path *path)
 6293 {
 6294         struct ccb_pathinq cpi;
 6295         struct ccb_trans_settings cts;
 6296         struct scsi_inquiry_data *inq_buf;
 6297 
 6298         /* Get transport information from the SIM */
 6299         xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
 6300         cpi.ccb_h.func_code = XPT_PATH_INQ;
 6301         xpt_action((union ccb *)&cpi);
 6302 
 6303         inq_buf = NULL;
 6304         if ((path->device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0)
 6305                 inq_buf = &path->device->inq_data;
 6306         path->device->protocol = PROTO_SCSI;
 6307         path->device->protocol_version =
 6308             inq_buf != NULL ? SID_ANSI_REV(inq_buf) : cpi.protocol_version;
 6309         path->device->transport = cpi.transport;
 6310         path->device->transport_version = cpi.transport_version;
 6311 
 6312         /*
 6313          * Any device not using SPI3 features should
 6314          * be considered SPI2 or lower.
 6315          */
 6316         if (inq_buf != NULL) {
 6317                 if (path->device->transport == XPORT_SPI
 6318                  && (inq_buf->spi3data & SID_SPI_MASK) == 0
 6319                  && path->device->transport_version > 2)
 6320                         path->device->transport_version = 2;
 6321         } else {
 6322                 struct cam_ed* otherdev;
 6323 
 6324                 for (otherdev = TAILQ_FIRST(&path->target->ed_entries);
 6325                      otherdev != NULL;
 6326                      otherdev = TAILQ_NEXT(otherdev, links)) {
 6327                         if (otherdev != path->device)
 6328                                 break;
 6329                 }
 6330                     
 6331                 if (otherdev != NULL) {
 6332                         /*
 6333                          * Initially assume the same versioning as
 6334                          * prior luns for this target.
 6335                          */
 6336                         path->device->protocol_version =
 6337                             otherdev->protocol_version;
 6338                         path->device->transport_version =
 6339                             otherdev->transport_version;
 6340                 } else {
 6341                         /* Until we know better, opt for safty */
 6342                         path->device->protocol_version = 2;
 6343                         if (path->device->transport == XPORT_SPI)
 6344                                 path->device->transport_version = 2;
 6345                         else
 6346                                 path->device->transport_version = 0;
 6347                 }
 6348         }
 6349 
 6350         /*
 6351          * XXX
 6352          * For a device compliant with SPC-2 we should be able
 6353          * to determine the transport version supported by
 6354          * scrutinizing the version descriptors in the
 6355          * inquiry buffer.
 6356          */
 6357 
 6358         /* Tell the controller what we think */
 6359         xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1);
 6360         cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
 6361         cts.type = CTS_TYPE_CURRENT_SETTINGS;
 6362         cts.transport = path->device->transport;
 6363         cts.transport_version = path->device->transport_version;
 6364         cts.protocol = path->device->protocol;
 6365         cts.protocol_version = path->device->protocol_version;
 6366         cts.proto_specific.valid = 0;
 6367         cts.xport_specific.valid = 0;
 6368         xpt_action((union ccb *)&cts);
 6369 }
 6370 
 6371 static void
 6372 xpt_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_ed *device,
 6373                           int async_update)
 6374 {
 6375         struct  ccb_pathinq cpi;
 6376         struct  ccb_trans_settings cur_cts;
 6377         struct  ccb_trans_settings_scsi *scsi;
 6378         struct  ccb_trans_settings_scsi *cur_scsi;
 6379         struct  cam_sim *sim;
 6380         struct  scsi_inquiry_data *inq_data;
 6381 
 6382         if (device == NULL) {
 6383                 cts->ccb_h.status = CAM_PATH_INVALID;
 6384                 xpt_done((union ccb *)cts);
 6385                 return;
 6386         }
 6387 
 6388         if (cts->protocol == PROTO_UNKNOWN
 6389          || cts->protocol == PROTO_UNSPECIFIED) {
 6390                 cts->protocol = device->protocol;
 6391                 cts->protocol_version = device->protocol_version;
 6392         }
 6393 
 6394         if (cts->protocol_version == PROTO_VERSION_UNKNOWN
 6395          || cts->protocol_version == PROTO_VERSION_UNSPECIFIED)
 6396                 cts->protocol_version = device->protocol_version;
 6397 
 6398         if (cts->protocol != device->protocol) {
 6399                 xpt_print_path(cts->ccb_h.path);
 6400                 printf("Uninitialized Protocol %x:%x?\n",
 6401                        cts->protocol, device->protocol);
 6402                 cts->protocol = device->protocol;
 6403         }
 6404 
 6405         if (cts->protocol_version > device->protocol_version) {
 6406                 if (bootverbose) {
 6407                         xpt_print_path(cts->ccb_h.path);
 6408                         printf("Down reving Protocol Version from %d to %d?\n",
 6409                                cts->protocol_version, device->protocol_version);
 6410                 }
 6411                 cts->protocol_version = device->protocol_version;
 6412         }
 6413 
 6414         if (cts->transport == XPORT_UNKNOWN
 6415          || cts->transport == XPORT_UNSPECIFIED) {
 6416                 cts->transport = device->transport;
 6417                 cts->transport_version = device->transport_version;
 6418         }
 6419 
 6420         if (cts->transport_version == XPORT_VERSION_UNKNOWN
 6421          || cts->transport_version == XPORT_VERSION_UNSPECIFIED)
 6422                 cts->transport_version = device->transport_version;
 6423 
 6424         if (cts->transport != device->transport) {
 6425                 xpt_print_path(cts->ccb_h.path);
 6426                 printf("Uninitialized Transport %x:%x?\n",
 6427                        cts->transport, device->transport);
 6428                 cts->transport = device->transport;
 6429         }
 6430 
 6431         if (cts->transport_version > device->transport_version) {
 6432                 if (bootverbose) {
 6433                         xpt_print_path(cts->ccb_h.path);
 6434                         printf("Down reving Transport Version from %d to %d?\n",
 6435                                cts->transport_version,
 6436                                device->transport_version);
 6437                 }
 6438                 cts->transport_version = device->transport_version;
 6439         }
 6440 
 6441         sim = cts->ccb_h.path->bus->sim;
 6442 
 6443         /*
 6444          * Nothing more of interest to do unless
 6445          * this is a device connected via the
 6446          * SCSI protocol.
 6447          */
 6448         if (cts->protocol != PROTO_SCSI) {
 6449                 if (async_update == FALSE) 
 6450                         (*(sim->sim_action))(sim, (union ccb *)cts);
 6451                 return;
 6452         }
 6453 
 6454         inq_data = &device->inq_data;
 6455         scsi = &cts->proto_specific.scsi;
 6456         xpt_setup_ccb(&cpi.ccb_h, cts->ccb_h.path, /*priority*/1);
 6457         cpi.ccb_h.func_code = XPT_PATH_INQ;
 6458         xpt_action((union ccb *)&cpi);
 6459 
 6460         /* SCSI specific sanity checking */
 6461         if ((cpi.hba_inquiry & PI_TAG_ABLE) == 0
 6462          || (INQ_DATA_TQ_ENABLED(inq_data)) == 0
 6463          || (device->queue_flags & SCP_QUEUE_DQUE) != 0
 6464          || (device->quirk->mintags == 0)) {
 6465                 /*
 6466                  * Can't tag on hardware that doesn't support tags,
 6467                  * doesn't have it enabled, or has broken tag support.
 6468                  */
 6469                 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
 6470         }
 6471 
 6472         if (async_update == FALSE) {
 6473                 /*
 6474                  * Perform sanity checking against what the
 6475                  * controller and device can do.
 6476                  */
 6477                 xpt_setup_ccb(&cur_cts.ccb_h, cts->ccb_h.path, /*priority*/1);
 6478                 cur_cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
 6479                 cur_cts.type = cts->type;
 6480                 xpt_action((union ccb *)&cur_cts);
 6481 
 6482                 cur_scsi = &cur_cts.proto_specific.scsi;
 6483                 if ((scsi->valid & CTS_SCSI_VALID_TQ) == 0) {
 6484                         scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
 6485                         scsi->flags |= cur_scsi->flags & CTS_SCSI_FLAGS_TAG_ENB;
 6486                 }
 6487                 if ((cur_scsi->valid & CTS_SCSI_VALID_TQ) == 0)
 6488                         scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
 6489         }
 6490 
 6491         /* SPI specific sanity checking */
 6492         if (cts->transport == XPORT_SPI && async_update == FALSE) {
 6493                 u_int spi3caps;
 6494                 struct ccb_trans_settings_spi *spi;
 6495                 struct ccb_trans_settings_spi *cur_spi;
 6496 
 6497                 spi = &cts->xport_specific.spi;
 6498 
 6499                 cur_spi = &cur_cts.xport_specific.spi;
 6500 
 6501                 /* Fill in any gaps in what the user gave us */
 6502                 if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0)
 6503                         spi->sync_period = cur_spi->sync_period;
 6504                 if ((cur_spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0)
 6505                         spi->sync_period = 0;
 6506                 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0)
 6507                         spi->sync_offset = cur_spi->sync_offset;
 6508                 if ((cur_spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0)
 6509                         spi->sync_offset = 0;
 6510                 if ((spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0)
 6511                         spi->ppr_options = cur_spi->ppr_options;
 6512                 if ((cur_spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0)
 6513                         spi->ppr_options = 0;
 6514                 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) == 0)
 6515                         spi->bus_width = cur_spi->bus_width;
 6516                 if ((cur_spi->valid & CTS_SPI_VALID_BUS_WIDTH) == 0)
 6517                         spi->bus_width = 0;
 6518                 if ((spi->valid & CTS_SPI_VALID_DISC) == 0) {
 6519                         spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
 6520                         spi->flags |= cur_spi->flags & CTS_SPI_FLAGS_DISC_ENB;
 6521                 }
 6522                 if ((cur_spi->valid & CTS_SPI_VALID_DISC) == 0)
 6523                         spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
 6524                 if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0
 6525                   && (inq_data->flags & SID_Sync) == 0
 6526                   && cts->type == CTS_TYPE_CURRENT_SETTINGS)
 6527                  || ((cpi.hba_inquiry & PI_SDTR_ABLE) == 0)
 6528                  || (cur_spi->sync_offset == 0)
 6529                  || (cur_spi->sync_period == 0)) {
 6530                         /* Force async */
 6531                         spi->sync_period = 0;
 6532                         spi->sync_offset = 0;
 6533                 }
 6534 
 6535                 switch (spi->bus_width) {
 6536                 case MSG_EXT_WDTR_BUS_32_BIT:
 6537                         if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
 6538                           || (inq_data->flags & SID_WBus32) != 0
 6539                           || cts->type == CTS_TYPE_USER_SETTINGS)
 6540                          && (cpi.hba_inquiry & PI_WIDE_32) != 0)
 6541                                 break;
 6542                         /* Fall Through to 16-bit */
 6543                 case MSG_EXT_WDTR_BUS_16_BIT:
 6544                         if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
 6545                           || (inq_data->flags & SID_WBus16) != 0
 6546                           || cts->type == CTS_TYPE_USER_SETTINGS)
 6547                          && (cpi.hba_inquiry & PI_WIDE_16) != 0) {
 6548                                 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
 6549                                 break;
 6550                         }
 6551                         /* Fall Through to 8-bit */
 6552                 default: /* New bus width?? */
 6553                 case MSG_EXT_WDTR_BUS_8_BIT:
 6554                         /* All targets can do this */
 6555                         spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
 6556                         break;
 6557                 }
 6558 
 6559                 spi3caps = cpi.xport_specific.spi.ppr_options;
 6560                 if ((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0
 6561                  && cts->type == CTS_TYPE_CURRENT_SETTINGS)
 6562                         spi3caps &= inq_data->spi3data;
 6563 
 6564                 if ((spi3caps & SID_SPI_CLOCK_DT) == 0)
 6565                         spi->ppr_options &= ~MSG_EXT_PPR_DT_REQ;
 6566 
 6567                 if ((spi3caps & SID_SPI_IUS) == 0)
 6568                         spi->ppr_options &= ~MSG_EXT_PPR_IU_REQ;
 6569 
 6570                 if ((spi3caps & SID_SPI_QAS) == 0)
 6571                         spi->ppr_options &= ~MSG_EXT_PPR_QAS_REQ;
 6572 
 6573                 /* No SPI Transfer settings are allowed unless we are wide */
 6574                 if (spi->bus_width == 0)
 6575                         spi->ppr_options = 0;
 6576 
 6577                 if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) == 0) {
 6578                         /*
 6579                          * Can't tag queue without disconnection.
 6580                          */
 6581                         scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
 6582                         scsi->valid |= CTS_SCSI_VALID_TQ;
 6583                 }
 6584 
 6585                 /*
 6586                  * If we are currently performing tagged transactions to
 6587                  * this device and want to change its negotiation parameters,
 6588                  * go non-tagged for a bit to give the controller a chance to
 6589                  * negotiate unhampered by tag messages.
 6590                  */
 6591                 if (cts->type == CTS_TYPE_CURRENT_SETTINGS
 6592                  && (device->inq_flags & SID_CmdQue) != 0
 6593                  && (scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0
 6594                  && (spi->flags & (CTS_SPI_VALID_SYNC_RATE|
 6595                                    CTS_SPI_VALID_SYNC_OFFSET|
 6596                                    CTS_SPI_VALID_BUS_WIDTH)) != 0)
 6597                         xpt_toggle_tags(cts->ccb_h.path);
 6598         }
 6599 
 6600         if (cts->type == CTS_TYPE_CURRENT_SETTINGS
 6601          && (scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
 6602                 int device_tagenb;
 6603 
 6604                 /*
 6605                  * If we are transitioning from tags to no-tags or
 6606                  * vice-versa, we need to carefully freeze and restart
 6607                  * the queue so that we don't overlap tagged and non-tagged
 6608                  * commands.  We also temporarily stop tags if there is
 6609                  * a change in transfer negotiation settings to allow
 6610                  * "tag-less" negotiation.
 6611                  */
 6612                 if ((device->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
 6613                  || (device->inq_flags & SID_CmdQue) != 0)
 6614                         device_tagenb = TRUE;
 6615                 else
 6616                         device_tagenb = FALSE;
 6617 
 6618                 if (((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0
 6619                   && device_tagenb == FALSE)
 6620                  || ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) == 0
 6621                   && device_tagenb == TRUE)) {
 6622 
 6623                         if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) {
 6624                                 /*
 6625                                  * Delay change to use tags until after a
 6626                                  * few commands have gone to this device so
 6627                                  * the controller has time to perform transfer
 6628                                  * negotiations without tagged messages getting
 6629                                  * in the way.
 6630                                  */
 6631                                 device->tag_delay_count = CAM_TAG_DELAY_COUNT;
 6632                                 device->flags |= CAM_DEV_TAG_AFTER_COUNT;
 6633                         } else {
 6634                                 struct ccb_relsim crs;
 6635 
 6636                                 xpt_freeze_devq(cts->ccb_h.path, /*count*/1);
 6637                                 device->inq_flags &= ~SID_CmdQue;
 6638                                 xpt_dev_ccbq_resize(cts->ccb_h.path,
 6639                                                     sim->max_dev_openings);
 6640                                 device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
 6641                                 device->tag_delay_count = 0;
 6642 
 6643                                 xpt_setup_ccb(&crs.ccb_h, cts->ccb_h.path,
 6644                                               /*priority*/1);
 6645                                 crs.ccb_h.func_code = XPT_REL_SIMQ;
 6646                                 crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
 6647                                 crs.openings
 6648                                     = crs.release_timeout 
 6649                                     = crs.qfrozen_cnt
 6650                                     = 0;
 6651                                 xpt_action((union ccb *)&crs);
 6652                         }
 6653                 }
 6654         }
 6655         if (async_update == FALSE) 
 6656                 (*(sim->sim_action))(sim, (union ccb *)cts);
 6657 }
 6658 
 6659 #else /* CAM_NEW_TRAN_CODE */
 6660 
 6661 static void
 6662 xpt_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_ed *device,
 6663                           int async_update)
 6664 {
 6665         struct  cam_sim *sim;
 6666         int     qfrozen;
 6667 
 6668         sim = cts->ccb_h.path->bus->sim;
 6669         if (async_update == FALSE) {
 6670                 struct  scsi_inquiry_data *inq_data;
 6671                 struct  ccb_pathinq cpi;
 6672                 struct  ccb_trans_settings cur_cts;
 6673 
 6674                 if (device == NULL) {
 6675                         cts->ccb_h.status = CAM_PATH_INVALID;
 6676                         xpt_done((union ccb *)cts);
 6677                         return;
 6678                 }
 6679 
 6680                 /*
 6681                  * Perform sanity checking against what the
 6682                  * controller and device can do.
 6683                  */
 6684                 xpt_setup_ccb(&cpi.ccb_h, cts->ccb_h.path, /*priority*/1);
 6685                 cpi.ccb_h.func_code = XPT_PATH_INQ;
 6686                 xpt_action((union ccb *)&cpi);
 6687                 xpt_setup_ccb(&cur_cts.ccb_h, cts->ccb_h.path, /*priority*/1);
 6688                 cur_cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
 6689                 cur_cts.flags = CCB_TRANS_CURRENT_SETTINGS;
 6690                 xpt_action((union ccb *)&cur_cts);
 6691                 inq_data = &device->inq_data;
 6692 
 6693                 /* Fill in any gaps in what the user gave us */
 6694                 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) == 0)
 6695                         cts->sync_period = cur_cts.sync_period;
 6696                 if ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) == 0)
 6697                         cts->sync_offset = cur_cts.sync_offset;
 6698                 if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) == 0)
 6699                         cts->bus_width = cur_cts.bus_width;
 6700                 if ((cts->valid & CCB_TRANS_DISC_VALID) == 0) {
 6701                         cts->flags &= ~CCB_TRANS_DISC_ENB;
 6702                         cts->flags |= cur_cts.flags & CCB_TRANS_DISC_ENB;
 6703                 }
 6704                 if ((cts->valid & CCB_TRANS_TQ_VALID) == 0) {
 6705                         cts->flags &= ~CCB_TRANS_TAG_ENB;
 6706                         cts->flags |= cur_cts.flags & CCB_TRANS_TAG_ENB;
 6707                 }
 6708 
 6709                 if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0
 6710                   && (inq_data->flags & SID_Sync) == 0)
 6711                  || ((cpi.hba_inquiry & PI_SDTR_ABLE) == 0)
 6712                  || (cts->sync_offset == 0)
 6713                  || (cts->sync_period == 0)) {
 6714                         /* Force async */
 6715                         cts->sync_period = 0;
 6716                         cts->sync_offset = 0;
 6717                 } else if ((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0
 6718                         && (inq_data->spi3data & SID_SPI_CLOCK_DT) == 0
 6719                         && cts->sync_period <= 0x9) {
 6720                         /*
 6721                          * Don't allow DT transmission rates if the
 6722                          * device does not support it.
 6723                          */
 6724                         cts->sync_period = 0xa;
 6725                 }
 6726 
 6727                 switch (cts->bus_width) {
 6728                 case MSG_EXT_WDTR_BUS_32_BIT:
 6729                         if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
 6730                           || (inq_data->flags & SID_WBus32) != 0)
 6731                          && (cpi.hba_inquiry & PI_WIDE_32) != 0)
 6732                                 break;
 6733                         /* FALLTHROUGH to 16-bit */
 6734                 case MSG_EXT_WDTR_BUS_16_BIT:
 6735                         if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
 6736                           || (inq_data->flags & SID_WBus16) != 0)
 6737                          && (cpi.hba_inquiry & PI_WIDE_16) != 0) {
 6738                                 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
 6739                                 break;
 6740                         }
 6741                         /* FALLTHROUGH to 8-bit */
 6742                 default: /* New bus width?? */
 6743                 case MSG_EXT_WDTR_BUS_8_BIT:
 6744                         /* All targets can do this */
 6745                         cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
 6746                         break;
 6747                 }
 6748 
 6749                 if ((cts->flags & CCB_TRANS_DISC_ENB) == 0) {
 6750                         /*
 6751                          * Can't tag queue without disconnection.
 6752                          */
 6753                         cts->flags &= ~CCB_TRANS_TAG_ENB;
 6754                         cts->valid |= CCB_TRANS_TQ_VALID;
 6755                 }
 6756 
 6757                 if ((cpi.hba_inquiry & PI_TAG_ABLE) == 0
 6758                  || (INQ_DATA_TQ_ENABLED(inq_data)) == 0
 6759                  || (device->queue_flags & SCP_QUEUE_DQUE) != 0
 6760                  || (device->quirk->mintags == 0)) {
 6761                         /*
 6762                          * Can't tag on hardware that doesn't support,
 6763                          * doesn't have it enabled, or has broken tag support.
 6764                          */
 6765                         cts->flags &= ~CCB_TRANS_TAG_ENB;
 6766                 }
 6767         }
 6768 
 6769         qfrozen = FALSE;
 6770         if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) {
 6771                 int device_tagenb;
 6772 
 6773                 /*
 6774                  * If we are transitioning from tags to no-tags or
 6775                  * vice-versa, we need to carefully freeze and restart
 6776                  * the queue so that we don't overlap tagged and non-tagged
 6777                  * commands.  We also temporarily stop tags if there is
 6778                  * a change in transfer negotiation settings to allow
 6779                  * "tag-less" negotiation.
 6780