The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/cam/cam_xpt.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Implementation of the Common Access Method Transport (XPT) layer.
    3  *
    4  * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs.
    5  * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry.
    6  * All rights reserved.
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions, and the following disclaimer,
   13  *    without modification, immediately at the beginning of the file.
   14  * 2. The name of the author may not be used to endorse or promote products
   15  *    derived from this software without specific prior written permission.
   16  *
   17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   20  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
   21  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   27  * SUCH DAMAGE.
   28  */
   29 
   30 #include <sys/cdefs.h>
   31 __FBSDID("$FreeBSD: releng/5.3/sys/cam/cam_xpt.c 132199 2004-07-15 08:26:07Z phk $");
   32 
   33 #include <sys/param.h>
   34 #include <sys/bus.h>
   35 #include <sys/systm.h>
   36 #include <sys/types.h>
   37 #include <sys/malloc.h>
   38 #include <sys/kernel.h>
   39 #include <sys/time.h>
   40 #include <sys/conf.h>
   41 #include <sys/fcntl.h>
   42 #include <sys/md5.h>
   43 #include <sys/interrupt.h>
   44 #include <sys/sbuf.h>
   45 
   46 #ifdef PC98
   47 #include <pc98/pc98/pc98_machdep.h>     /* geometry translation */
   48 #endif
   49 
   50 #include <cam/cam.h>
   51 #include <cam/cam_ccb.h>
   52 #include <cam/cam_periph.h>
   53 #include <cam/cam_sim.h>
   54 #include <cam/cam_xpt.h>
   55 #include <cam/cam_xpt_sim.h>
   56 #include <cam/cam_xpt_periph.h>
   57 #include <cam/cam_debug.h>
   58 
   59 #include <cam/scsi/scsi_all.h>
   60 #include <cam/scsi/scsi_message.h>
   61 #include <cam/scsi/scsi_pass.h>
   62 #include "opt_cam.h"
   63 
   64 /* Datastructures internal to the xpt layer */
   65 
   66 /*
   67  * Definition of an async handler callback block.  These are used to add
   68  * SIMs and peripherals to the async callback lists.
   69  */
   70 struct async_node {
   71         SLIST_ENTRY(async_node) links;
   72         u_int32_t       event_enable;   /* Async Event enables */
   73         void            (*callback)(void *arg, u_int32_t code,
   74                                     struct cam_path *path, void *args);
   75         void            *callback_arg;
   76 };
   77 
   78 SLIST_HEAD(async_list, async_node);
   79 SLIST_HEAD(periph_list, cam_periph);
   80 static STAILQ_HEAD(highpowerlist, ccb_hdr) highpowerq;
   81 
   82 /*
   83  * This is the maximum number of high powered commands (e.g. start unit)
   84  * that can be outstanding at a particular time.
   85  */
   86 #ifndef CAM_MAX_HIGHPOWER
   87 #define CAM_MAX_HIGHPOWER  4
   88 #endif
   89 
   90 /* number of high powered commands that can go through right now */
   91 static int num_highpower = CAM_MAX_HIGHPOWER;
   92 
   93 /*
   94  * Structure for queueing a device in a run queue.
   95  * There is one run queue for allocating new ccbs,
   96  * and another for sending ccbs to the controller.
   97  */
   98 struct cam_ed_qinfo {
   99         cam_pinfo pinfo;
  100         struct    cam_ed *device;
  101 };
  102 
  103 /*
  104  * The CAM EDT (Existing Device Table) contains the device information for
  105  * all devices for all busses in the system.  The table contains a
  106  * cam_ed structure for each device on the bus.
  107  */
  108 struct cam_ed {
  109         TAILQ_ENTRY(cam_ed) links;
  110         struct  cam_ed_qinfo alloc_ccb_entry;
  111         struct  cam_ed_qinfo send_ccb_entry;
  112         struct  cam_et   *target;
  113         lun_id_t         lun_id;
  114         struct  camq drvq;              /*
  115                                          * Queue of type drivers wanting to do
  116                                          * work on this device.
  117                                          */
  118         struct  cam_ccbq ccbq;          /* Queue of pending ccbs */
  119         struct  async_list asyncs;      /* Async callback info for this B/T/L */
  120         struct  periph_list periphs;    /* All attached devices */
  121         u_int   generation;             /* Generation number */
  122         struct  cam_periph *owner;      /* Peripheral driver's ownership tag */
  123         struct  xpt_quirk_entry *quirk; /* Oddities about this device */
  124                                         /* Storage for the inquiry data */
  125 #ifdef CAM_NEW_TRAN_CODE
  126         cam_proto        protocol;
  127         u_int            protocol_version;
  128         cam_xport        transport;
  129         u_int            transport_version;
  130 #endif /* CAM_NEW_TRAN_CODE */
  131         struct           scsi_inquiry_data inq_data;
  132         u_int8_t         inq_flags;     /*
  133                                          * Current settings for inquiry flags.
  134                                          * This allows us to override settings
  135                                          * like disconnection and tagged
  136                                          * queuing for a device.
  137                                          */
  138         u_int8_t         queue_flags;   /* Queue flags from the control page */
  139         u_int8_t         serial_num_len;
  140         u_int8_t        *serial_num;
  141         u_int32_t        qfrozen_cnt;
  142         u_int32_t        flags;
  143 #define CAM_DEV_UNCONFIGURED            0x01
  144 #define CAM_DEV_REL_TIMEOUT_PENDING     0x02
  145 #define CAM_DEV_REL_ON_COMPLETE         0x04
  146 #define CAM_DEV_REL_ON_QUEUE_EMPTY      0x08
  147 #define CAM_DEV_RESIZE_QUEUE_NEEDED     0x10
  148 #define CAM_DEV_TAG_AFTER_COUNT         0x20
  149 #define CAM_DEV_INQUIRY_DATA_VALID      0x40
  150         u_int32_t        tag_delay_count;
  151 #define CAM_TAG_DELAY_COUNT             5
  152         u_int32_t        refcount;
  153         struct           callout_handle c_handle;
  154 };
  155 
  156 /*
  157  * Each target is represented by an ET (Existing Target).  These
  158  * entries are created when a target is successfully probed with an
  159  * identify, and removed when a device fails to respond after a number
  160  * of retries, or a bus rescan finds the device missing.
  161  */
  162 struct cam_et { 
  163         TAILQ_HEAD(, cam_ed) ed_entries;
  164         TAILQ_ENTRY(cam_et) links;
  165         struct  cam_eb  *bus;   
  166         target_id_t     target_id;
  167         u_int32_t       refcount;       
  168         u_int           generation;
  169         struct          timeval last_reset;
  170 };
  171 
  172 /*
  173  * Each bus is represented by an EB (Existing Bus).  These entries
  174  * are created by calls to xpt_bus_register and deleted by calls to
  175  * xpt_bus_deregister.
  176  */
  177 struct cam_eb { 
  178         TAILQ_HEAD(, cam_et) et_entries;
  179         TAILQ_ENTRY(cam_eb)  links;
  180         path_id_t            path_id;
  181         struct cam_sim       *sim;
  182         struct timeval       last_reset;
  183         u_int32_t            flags;
  184 #define CAM_EB_RUNQ_SCHEDULED   0x01
  185         u_int32_t            refcount;
  186         u_int                generation;
  187 };
  188 
  189 struct cam_path {
  190         struct cam_periph *periph;
  191         struct cam_eb     *bus;
  192         struct cam_et     *target;
  193         struct cam_ed     *device;
  194 };
  195 
  196 struct xpt_quirk_entry {
  197         struct scsi_inquiry_pattern inq_pat;
  198         u_int8_t quirks;
  199 #define CAM_QUIRK_NOLUNS        0x01
  200 #define CAM_QUIRK_NOSERIAL      0x02
  201 #define CAM_QUIRK_HILUNS        0x04
  202         u_int mintags;
  203         u_int maxtags;
  204 };
  205 #define CAM_SCSI2_MAXLUN        8
  206 
  207 typedef enum {
  208         XPT_FLAG_OPEN           = 0x01
  209 } xpt_flags;
  210 
  211 struct xpt_softc {
  212         xpt_flags       flags;
  213         u_int32_t       generation;
  214 };
  215 
  216 static const char quantum[] = "QUANTUM";
  217 static const char sony[] = "SONY";
  218 static const char west_digital[] = "WDIGTL";
  219 static const char samsung[] = "SAMSUNG";
  220 static const char seagate[] = "SEAGATE";
  221 static const char microp[] = "MICROP";
  222 
  223 static struct xpt_quirk_entry xpt_quirk_table[] = 
  224 {
  225         {
  226                 /* Reports QUEUE FULL for temporary resource shortages */
  227                 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP39100*", "*" },
  228                 /*quirks*/0, /*mintags*/24, /*maxtags*/32
  229         },
  230         {
  231                 /* Reports QUEUE FULL for temporary resource shortages */
  232                 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP34550*", "*" },
  233                 /*quirks*/0, /*mintags*/24, /*maxtags*/32
  234         },
  235         {
  236                 /* Reports QUEUE FULL for temporary resource shortages */
  237                 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP32275*", "*" },
  238                 /*quirks*/0, /*mintags*/24, /*maxtags*/32
  239         },
  240         {
  241                 /* Broken tagged queuing drive */
  242                 { T_DIRECT, SIP_MEDIA_FIXED, microp, "4421-07*", "*" },
  243                 /*quirks*/0, /*mintags*/0, /*maxtags*/
  244         },
  245         {
  246                 /* Broken tagged queuing drive */
  247                 { T_DIRECT, SIP_MEDIA_FIXED, "HP", "C372*", "*" },
  248                 /*quirks*/0, /*mintags*/0, /*maxtags*/
  249         },
  250         {
  251                 /* Broken tagged queuing drive */
  252                 { T_DIRECT, SIP_MEDIA_FIXED, microp, "3391*", "x43h" },
  253                 /*quirks*/0, /*mintags*/0, /*maxtags*/
  254         },
  255         {
  256                 /*
  257                  * Unfortunately, the Quantum Atlas III has the same
  258                  * problem as the Atlas II drives above.
  259                  * Reported by: "Johan Granlund" <johan@granlund.nu>
  260                  *
  261                  * For future reference, the drive with the problem was:
  262                  * QUANTUM QM39100TD-SW N1B0
  263                  * 
  264                  * It's possible that Quantum will fix the problem in later
  265                  * firmware revisions.  If that happens, the quirk entry
  266                  * will need to be made specific to the firmware revisions
  267                  * with the problem.
  268                  * 
  269                  */
  270                 /* Reports QUEUE FULL for temporary resource shortages */
  271                 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM39100*", "*" },
  272                 /*quirks*/0, /*mintags*/24, /*maxtags*/32
  273         },
  274         {
  275                 /*
  276                  * 18 Gig Atlas III, same problem as the 9G version.
  277                  * Reported by: Andre Albsmeier
  278                  *              <andre.albsmeier@mchp.siemens.de>
  279                  *
  280                  * For future reference, the drive with the problem was:
  281                  * QUANTUM QM318000TD-S N491
  282                  */
  283                 /* Reports QUEUE FULL for temporary resource shortages */
  284                 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM318000*", "*" },
  285                 /*quirks*/0, /*mintags*/24, /*maxtags*/32
  286         },
  287         {
  288                 /*
  289                  * Broken tagged queuing drive
  290                  * Reported by: Bret Ford <bford@uop.cs.uop.edu>
  291                  *         and: Martin Renters <martin@tdc.on.ca>
  292                  */
  293                 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST410800*", "71*" },
  294                 /*quirks*/0, /*mintags*/0, /*maxtags*/
  295         },
  296                 /*
  297                  * The Seagate Medalist Pro drives have very poor write
  298                  * performance with anything more than 2 tags.
  299                  * 
  300                  * Reported by:  Paul van der Zwan <paulz@trantor.xs4all.nl>
  301                  * Drive:  <SEAGATE ST36530N 1444>
  302                  *
  303                  * Reported by:  Jeremy Lea <reg@shale.csir.co.za>
  304                  * Drive:  <SEAGATE ST34520W 1281>
  305                  *
  306                  * No one has actually reported that the 9G version
  307                  * (ST39140*) of the Medalist Pro has the same problem, but
  308                  * we're assuming that it does because the 4G and 6.5G
  309                  * versions of the drive are broken.
  310                  */
  311         {
  312                 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST34520*", "*"},
  313                 /*quirks*/0, /*mintags*/2, /*maxtags*/2
  314         },
  315         {
  316                 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST36530*", "*"},
  317                 /*quirks*/0, /*mintags*/2, /*maxtags*/2
  318         },
  319         {
  320                 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST39140*", "*"},
  321                 /*quirks*/0, /*mintags*/2, /*maxtags*/2
  322         },
  323         {
  324                 /*
  325                  * Slow when tagged queueing is enabled.  Write performance
  326                  * steadily drops off with more and more concurrent
  327                  * transactions.  Best sequential write performance with
  328                  * tagged queueing turned off and write caching turned on.
  329                  *
  330                  * PR:  kern/10398
  331                  * Submitted by:  Hideaki Okada <hokada@isl.melco.co.jp>
  332                  * Drive:  DCAS-34330 w/ "S65A" firmware.
  333                  *
  334                  * The drive with the problem had the "S65A" firmware
  335                  * revision, and has also been reported (by Stephen J.
  336                  * Roznowski <sjr@home.net>) for a drive with the "S61A"
  337                  * firmware revision.
  338                  *
  339                  * Although no one has reported problems with the 2 gig
  340                  * version of the DCAS drive, the assumption is that it
  341                  * has the same problems as the 4 gig version.  Therefore
  342                  * this quirk entries disables tagged queueing for all
  343                  * DCAS drives.
  344                  */
  345                 { T_DIRECT, SIP_MEDIA_FIXED, "IBM", "DCAS*", "*" },
  346                 /*quirks*/0, /*mintags*/0, /*maxtags*/
  347         },
  348         {
  349                 /* Broken tagged queuing drive */
  350                 { T_DIRECT, SIP_MEDIA_REMOVABLE, "iomega", "jaz*", "*" },
  351                 /*quirks*/0, /*mintags*/0, /*maxtags*/
  352         },
  353         {
  354                 /* Broken tagged queuing drive */ 
  355                 { T_DIRECT, SIP_MEDIA_FIXED, "CONNER", "CFP2107*", "*" },
  356                 /*quirks*/0, /*mintags*/0, /*maxtags*/
  357         },
  358         {
  359                 /*
  360                  * Broken tagged queuing drive.
  361                  * Submitted by:
  362                  * NAKAJI Hiroyuki <nakaji@zeisei.dpri.kyoto-u.ac.jp>
  363                  * in PR kern/9535
  364                  */
  365                 { T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN34324U*", "*" },
  366                 /*quirks*/0, /*mintags*/0, /*maxtags*/
  367         },
  368         {
  369                 /*
  370                  * Slow when tagged queueing is enabled. (1.5MB/sec versus
  371                  * 8MB/sec.)
  372                  * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
  373                  * Best performance with these drives is achieved with
  374                  * tagged queueing turned off, and write caching turned on.
  375                  */
  376                 { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "WDE*", "*" },
  377                 /*quirks*/0, /*mintags*/0, /*maxtags*/
  378         },
  379         {
  380                 /*
  381                  * Slow when tagged queueing is enabled. (1.5MB/sec versus
  382                  * 8MB/sec.)
  383                  * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
  384                  * Best performance with these drives is achieved with
  385                  * tagged queueing turned off, and write caching turned on.
  386                  */
  387                 { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "ENTERPRISE", "*" },
  388                 /*quirks*/0, /*mintags*/0, /*maxtags*/
  389         },
  390         {
  391                 /*
  392                  * Doesn't handle queue full condition correctly,
  393                  * so we need to limit maxtags to what the device
  394                  * can handle instead of determining this automatically.
  395                  */
  396                 { T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN321010S*", "*" },
  397                 /*quirks*/0, /*mintags*/2, /*maxtags*/32
  398         },
  399         {
  400                 /* Really only one LUN */
  401                 { T_ENCLOSURE, SIP_MEDIA_FIXED, "SUN", "SENA", "*" },
  402                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
  403         },
  404         {
  405                 /* I can't believe we need a quirk for DPT volumes. */
  406                 { T_ANY, SIP_MEDIA_FIXED|SIP_MEDIA_REMOVABLE, "DPT", "*", "*" },
  407                 CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS,
  408                 /*mintags*/0, /*maxtags*/255
  409         },
  410         {
  411                 /*
  412                  * Many Sony CDROM drives don't like multi-LUN probing.
  413                  */
  414                 { T_CDROM, SIP_MEDIA_REMOVABLE, sony, "CD-ROM CDU*", "*" },
  415                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
  416         },
  417         {
  418                 /*
  419                  * This drive doesn't like multiple LUN probing.
  420                  * Submitted by:  Parag Patel <parag@cgt.com>
  421                  */
  422                 { T_WORM, SIP_MEDIA_REMOVABLE, sony, "CD-R   CDU9*", "*" },
  423                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
  424         },
  425         {
  426                 { T_WORM, SIP_MEDIA_REMOVABLE, "YAMAHA", "CDR100*", "*" },
  427                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
  428         },
  429         {
  430                 /*
  431                  * The 8200 doesn't like multi-lun probing, and probably
  432                  * don't like serial number requests either.
  433                  */
  434                 {
  435                         T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE",
  436                         "EXB-8200*", "*"
  437                 },
  438                 CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
  439         },
  440         {
  441                 /*
  442                  * Let's try the same as above, but for a drive that says
  443                  * it's an IPL-6860 but is actually an EXB 8200.
  444                  */
  445                 {
  446                         T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE",
  447                         "IPL-6860*", "*"
  448                 },
  449                 CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
  450         },
  451         {
  452                 /*
  453                  * These Hitachi drives don't like multi-lun probing.
  454                  * The PR submitter has a DK319H, but says that the Linux
  455                  * kernel has a similar work-around for the DK312 and DK314,
  456                  * so all DK31* drives are quirked here.
  457                  * PR:            misc/18793
  458                  * Submitted by:  Paul Haddad <paul@pth.com>
  459                  */
  460                 { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "DK31*", "*" },
  461                 CAM_QUIRK_NOLUNS, /*mintags*/2, /*maxtags*/255
  462         },
  463         {
  464                 /*
  465                  * The Hitachi CJ series with J8A8 firmware apparantly has
  466                  * problems with tagged commands.
  467                  * PR: 23536
  468                  * Reported by: amagai@nue.org
  469                  */
  470                 { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "DK32CJ*", "J8A8" },
  471                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
  472         },
  473         {
  474                 /*
  475                  * These are the large storage arrays.
  476                  * Submitted by:  William Carrel <william.carrel@infospace.com>
  477                  */
  478                 { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "OPEN*", "*" },
  479                 CAM_QUIRK_HILUNS, 2, 1024
  480         },
  481         {
  482                 /*
  483                  * This old revision of the TDC3600 is also SCSI-1, and
  484                  * hangs upon serial number probing.
  485                  */
  486                 {
  487                         T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "TANDBERG",
  488                         " TDC 3600", "U07:"
  489                 },
  490                 CAM_QUIRK_NOSERIAL, /*mintags*/0, /*maxtags*/
  491         },
  492         {
  493                 /*
  494                  * Maxtor Personal Storage 3000XT (Firewire)
  495                  * hangs upon serial number probing.
  496                  */
  497                 {
  498                         T_DIRECT, SIP_MEDIA_FIXED, "Maxtor",
  499                         "1394 storage", "*"
  500                 },
  501                 CAM_QUIRK_NOSERIAL, /*mintags*/0, /*maxtags*/
  502         },
  503         {
  504                 /*
  505                  * Would repond to all LUNs if asked for.
  506                  */
  507                 {
  508                         T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "CALIPER",
  509                         "CP150", "*"
  510                 },
  511                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
  512         },
  513         {
  514                 /*
  515                  * Would repond to all LUNs if asked for.
  516                  */
  517                 {
  518                         T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "KENNEDY",
  519                         "96X2*", "*"
  520                 },
  521                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
  522         },
  523         {
  524                 /* Submitted by: Matthew Dodd <winter@jurai.net> */
  525                 { T_PROCESSOR, SIP_MEDIA_FIXED, "Cabletrn", "EA41*", "*" },
  526                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
  527         },
  528         {
  529                 /* Submitted by: Matthew Dodd <winter@jurai.net> */
  530                 { T_PROCESSOR, SIP_MEDIA_FIXED, "CABLETRN", "EA41*", "*" },
  531                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
  532         },
  533         {
  534                 /* TeraSolutions special settings for TRC-22 RAID */
  535                 { T_DIRECT, SIP_MEDIA_FIXED, "TERASOLU", "TRC-22", "*" },
  536                   /*quirks*/0, /*mintags*/55, /*maxtags*/255
  537         },
  538         {
  539                 /* Veritas Storage Appliance */
  540                 { T_DIRECT, SIP_MEDIA_FIXED, "VERITAS", "*", "*" },
  541                   CAM_QUIRK_HILUNS, /*mintags*/2, /*maxtags*/1024
  542         },
  543         {
  544                 /*
  545                  * Would respond to all LUNs.  Device type and removable
  546                  * flag are jumper-selectable.
  547                  */
  548                 { T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED, "MaxOptix",
  549                   "Tahiti 1", "*"
  550                 },
  551                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
  552         },
  553         {
  554                 /* Default tagged queuing parameters for all devices */
  555                 {
  556                   T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
  557                   /*vendor*/"*", /*product*/"*", /*revision*/"*"
  558                 },
  559                 /*quirks*/0, /*mintags*/2, /*maxtags*/255
  560         },
  561 };
  562 
  563 static const int xpt_quirk_table_size =
  564         sizeof(xpt_quirk_table) / sizeof(*xpt_quirk_table);
  565 
  566 typedef enum {
  567         DM_RET_COPY             = 0x01,
  568         DM_RET_FLAG_MASK        = 0x0f,
  569         DM_RET_NONE             = 0x00,
  570         DM_RET_STOP             = 0x10,
  571         DM_RET_DESCEND          = 0x20,
  572         DM_RET_ERROR            = 0x30,
  573         DM_RET_ACTION_MASK      = 0xf0
  574 } dev_match_ret;
  575 
  576 typedef enum {
  577         XPT_DEPTH_BUS,
  578         XPT_DEPTH_TARGET,
  579         XPT_DEPTH_DEVICE,
  580         XPT_DEPTH_PERIPH
  581 } xpt_traverse_depth;
  582 
  583 struct xpt_traverse_config {
  584         xpt_traverse_depth      depth;
  585         void                    *tr_func;
  586         void                    *tr_arg;
  587 };
  588 
  589 typedef int     xpt_busfunc_t (struct cam_eb *bus, void *arg);
  590 typedef int     xpt_targetfunc_t (struct cam_et *target, void *arg);
  591 typedef int     xpt_devicefunc_t (struct cam_ed *device, void *arg);
  592 typedef int     xpt_periphfunc_t (struct cam_periph *periph, void *arg);
  593 typedef int     xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg);
  594 
  595 /* Transport layer configuration information */
  596 static struct xpt_softc xsoftc;
  597 
  598 /* Queues for our software interrupt handler */
  599 typedef TAILQ_HEAD(cam_isrq, ccb_hdr) cam_isrq_t;
  600 static cam_isrq_t cam_bioq;
  601 static cam_isrq_t cam_netq;
  602 
  603 /* "Pool" of inactive ccbs managed by xpt_alloc_ccb and xpt_free_ccb */
  604 static SLIST_HEAD(,ccb_hdr) ccb_freeq;
  605 static u_int xpt_max_ccbs;      /*
  606                                  * Maximum size of ccb pool.  Modified as
  607                                  * devices are added/removed or have their
  608                                  * opening counts changed.
  609                                  */
  610 static u_int xpt_ccb_count;     /* Current count of allocated ccbs */
  611 
  612 struct cam_periph *xpt_periph;
  613 
  614 static periph_init_t xpt_periph_init;
  615 
  616 static periph_init_t probe_periph_init;
  617 
  618 static struct periph_driver xpt_driver =
  619 {
  620         xpt_periph_init, "xpt",
  621         TAILQ_HEAD_INITIALIZER(xpt_driver.units)
  622 };
  623 
  624 static struct periph_driver probe_driver =
  625 {
  626         probe_periph_init, "probe",
  627         TAILQ_HEAD_INITIALIZER(probe_driver.units)
  628 };
  629 
  630 PERIPHDRIVER_DECLARE(xpt, xpt_driver);
  631 PERIPHDRIVER_DECLARE(probe, probe_driver);
  632 
  633 
  634 static d_open_t xptopen;
  635 static d_close_t xptclose;
  636 static d_ioctl_t xptioctl;
  637 
  638 static struct cdevsw xpt_cdevsw = {
  639         .d_version =    D_VERSION,
  640         .d_flags =      D_NEEDGIANT,
  641         .d_open =       xptopen,
  642         .d_close =      xptclose,
  643         .d_ioctl =      xptioctl,
  644         .d_name =       "xpt",
  645 };
  646 
  647 static struct intr_config_hook *xpt_config_hook;
  648 
  649 /* Registered busses */
  650 static TAILQ_HEAD(,cam_eb) xpt_busses;
  651 static u_int bus_generation;
  652 
  653 /* Storage for debugging datastructures */
  654 #ifdef  CAMDEBUG
  655 struct cam_path *cam_dpath;
  656 u_int32_t cam_dflags;
  657 u_int32_t cam_debug_delay;
  658 #endif
  659 
  660 /* Pointers to software interrupt handlers */
  661 static void *camnet_ih;
  662 static void *cambio_ih;
  663 
  664 #if defined(CAM_DEBUG_FLAGS) && !defined(CAMDEBUG)
  665 #error "You must have options CAMDEBUG to use options CAM_DEBUG_FLAGS"
  666 #endif
  667 
  668 /*
  669  * In order to enable the CAM_DEBUG_* options, the user must have CAMDEBUG
  670  * enabled.  Also, the user must have either none, or all of CAM_DEBUG_BUS,
  671  * CAM_DEBUG_TARGET, and CAM_DEBUG_LUN specified.
  672  */
  673 #if defined(CAM_DEBUG_BUS) || defined(CAM_DEBUG_TARGET) \
  674     || defined(CAM_DEBUG_LUN)
  675 #ifdef CAMDEBUG
  676 #if !defined(CAM_DEBUG_BUS) || !defined(CAM_DEBUG_TARGET) \
  677     || !defined(CAM_DEBUG_LUN)
  678 #error "You must define all or none of CAM_DEBUG_BUS, CAM_DEBUG_TARGET \
  679         and CAM_DEBUG_LUN"
  680 #endif /* !CAM_DEBUG_BUS || !CAM_DEBUG_TARGET || !CAM_DEBUG_LUN */
  681 #else /* !CAMDEBUG */
  682 #error "You must use options CAMDEBUG if you use the CAM_DEBUG_* options"
  683 #endif /* CAMDEBUG */
  684 #endif /* CAM_DEBUG_BUS || CAM_DEBUG_TARGET || CAM_DEBUG_LUN */
  685 
  686 /* Our boot-time initialization hook */
  687 static int cam_module_event_handler(module_t, int /*modeventtype_t*/, void *);
  688 
  689 static moduledata_t cam_moduledata = {
  690         "cam",
  691         cam_module_event_handler,
  692         NULL
  693 };
  694 
  695 static void     xpt_init(void *);
  696 
  697 DECLARE_MODULE(cam, cam_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND);
  698 MODULE_VERSION(cam, 1);
  699 
  700 
  701 static cam_status       xpt_compile_path(struct cam_path *new_path,
  702                                          struct cam_periph *perph,
  703                                          path_id_t path_id,
  704                                          target_id_t target_id,
  705                                          lun_id_t lun_id);
  706 
  707 static void             xpt_release_path(struct cam_path *path);
  708 
  709 static void             xpt_async_bcast(struct async_list *async_head,
  710                                         u_int32_t async_code,
  711                                         struct cam_path *path,
  712                                         void *async_arg);
  713 static void             xpt_dev_async(u_int32_t async_code,
  714                                       struct cam_eb *bus,
  715                                       struct cam_et *target,
  716                                       struct cam_ed *device,
  717                                       void *async_arg);
  718 static path_id_t xptnextfreepathid(void);
  719 static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus);
  720 static union ccb *xpt_get_ccb(struct cam_ed *device);
  721 static int       xpt_schedule_dev(struct camq *queue, cam_pinfo *dev_pinfo,
  722                                   u_int32_t new_priority);
  723 static void      xpt_run_dev_allocq(struct cam_eb *bus);
  724 static void      xpt_run_dev_sendq(struct cam_eb *bus);
  725 static timeout_t xpt_release_devq_timeout;
  726 static timeout_t xpt_release_simq_timeout;
  727 static void      xpt_release_bus(struct cam_eb *bus);
  728 static void      xpt_release_devq_device(struct cam_ed *dev, u_int count,
  729                                          int run_queue);
  730 static struct cam_et*
  731                  xpt_alloc_target(struct cam_eb *bus, target_id_t target_id);
  732 static void      xpt_release_target(struct cam_eb *bus, struct cam_et *target);
  733 static struct cam_ed*
  734                  xpt_alloc_device(struct cam_eb *bus, struct cam_et *target,
  735                                   lun_id_t lun_id);
  736 static void      xpt_release_device(struct cam_eb *bus, struct cam_et *target,
  737                                     struct cam_ed *device);
  738 static u_int32_t xpt_dev_ccbq_resize(struct cam_path *path, int newopenings);
  739 static struct cam_eb*
  740                  xpt_find_bus(path_id_t path_id);
  741 static struct cam_et*
  742                  xpt_find_target(struct cam_eb *bus, target_id_t target_id);
  743 static struct cam_ed*
  744                  xpt_find_device(struct cam_et *target, lun_id_t lun_id);
  745 static void      xpt_scan_bus(struct cam_periph *periph, union ccb *ccb);
  746 static void      xpt_scan_lun(struct cam_periph *periph,
  747                               struct cam_path *path, cam_flags flags,
  748                               union ccb *ccb);
  749 static void      xptscandone(struct cam_periph *periph, union ccb *done_ccb);
  750 static xpt_busfunc_t    xptconfigbuscountfunc;
  751 static xpt_busfunc_t    xptconfigfunc;
  752 static void      xpt_config(void *arg);
  753 static xpt_devicefunc_t xptpassannouncefunc;
  754 static void      xpt_finishconfig(struct cam_periph *periph, union ccb *ccb);
  755 static void      xptaction(struct cam_sim *sim, union ccb *work_ccb);
  756 static void      xptpoll(struct cam_sim *sim);
  757 static void      camisr(void *);
  758 #if 0
  759 static void      xptstart(struct cam_periph *periph, union ccb *work_ccb);
  760 static void      xptasync(struct cam_periph *periph,
  761                           u_int32_t code, cam_path *path);
  762 #endif
  763 static dev_match_ret    xptbusmatch(struct dev_match_pattern *patterns,
  764                                     u_int num_patterns, struct cam_eb *bus);
  765 static dev_match_ret    xptdevicematch(struct dev_match_pattern *patterns,
  766                                        u_int num_patterns,
  767                                        struct cam_ed *device);
  768 static dev_match_ret    xptperiphmatch(struct dev_match_pattern *patterns,
  769                                        u_int num_patterns,
  770                                        struct cam_periph *periph);
  771 static xpt_busfunc_t    xptedtbusfunc;
  772 static xpt_targetfunc_t xptedttargetfunc;
  773 static xpt_devicefunc_t xptedtdevicefunc;
  774 static xpt_periphfunc_t xptedtperiphfunc;
  775 static xpt_pdrvfunc_t   xptplistpdrvfunc;
  776 static xpt_periphfunc_t xptplistperiphfunc;
  777 static int              xptedtmatch(struct ccb_dev_match *cdm);
  778 static int              xptperiphlistmatch(struct ccb_dev_match *cdm);
  779 static int              xptbustraverse(struct cam_eb *start_bus,
  780                                        xpt_busfunc_t *tr_func, void *arg);
  781 static int              xpttargettraverse(struct cam_eb *bus,
  782                                           struct cam_et *start_target,
  783                                           xpt_targetfunc_t *tr_func, void *arg);
  784 static int              xptdevicetraverse(struct cam_et *target,
  785                                           struct cam_ed *start_device,
  786                                           xpt_devicefunc_t *tr_func, void *arg);
  787 static int              xptperiphtraverse(struct cam_ed *device,
  788                                           struct cam_periph *start_periph,
  789                                           xpt_periphfunc_t *tr_func, void *arg);
  790 static int              xptpdrvtraverse(struct periph_driver **start_pdrv,
  791                                         xpt_pdrvfunc_t *tr_func, void *arg);
  792 static int              xptpdperiphtraverse(struct periph_driver **pdrv,
  793                                             struct cam_periph *start_periph,
  794                                             xpt_periphfunc_t *tr_func,
  795                                             void *arg);
  796 static xpt_busfunc_t    xptdefbusfunc;
  797 static xpt_targetfunc_t xptdeftargetfunc;
  798 static xpt_devicefunc_t xptdefdevicefunc;
  799 static xpt_periphfunc_t xptdefperiphfunc;
  800 static int              xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg);
  801 #ifdef notusedyet
  802 static int              xpt_for_all_targets(xpt_targetfunc_t *tr_func,
  803                                             void *arg);
  804 #endif
  805 static int              xpt_for_all_devices(xpt_devicefunc_t *tr_func,
  806                                             void *arg);
  807 #ifdef notusedyet
  808 static int              xpt_for_all_periphs(xpt_periphfunc_t *tr_func,
  809                                             void *arg);
  810 #endif
  811 static xpt_devicefunc_t xptsetasyncfunc;
  812 static xpt_busfunc_t    xptsetasyncbusfunc;
  813 static cam_status       xptregister(struct cam_periph *periph,
  814                                     void *arg);
  815 static cam_status       proberegister(struct cam_periph *periph,
  816                                       void *arg);
  817 static void      probeschedule(struct cam_periph *probe_periph);
  818 static void      probestart(struct cam_periph *periph, union ccb *start_ccb);
  819 static void      proberequestdefaultnegotiation(struct cam_periph *periph);
  820 static void      probedone(struct cam_periph *periph, union ccb *done_ccb);
  821 static void      probecleanup(struct cam_periph *periph);
  822 static void      xpt_find_quirk(struct cam_ed *device);
  823 #ifdef CAM_NEW_TRAN_CODE
  824 static void      xpt_devise_transport(struct cam_path *path);
  825 #endif /* CAM_NEW_TRAN_CODE */
  826 static void      xpt_set_transfer_settings(struct ccb_trans_settings *cts,
  827                                            struct cam_ed *device,
  828                                            int async_update);
  829 static void      xpt_toggle_tags(struct cam_path *path);
  830 static void      xpt_start_tags(struct cam_path *path);
  831 static __inline int xpt_schedule_dev_allocq(struct cam_eb *bus,
  832                                             struct cam_ed *dev);
  833 static __inline int xpt_schedule_dev_sendq(struct cam_eb *bus,
  834                                            struct cam_ed *dev);
  835 static __inline int periph_is_queued(struct cam_periph *periph);
  836 static __inline int device_is_alloc_queued(struct cam_ed *device);
  837 static __inline int device_is_send_queued(struct cam_ed *device);
  838 static __inline int dev_allocq_is_runnable(struct cam_devq *devq);
  839 
  840 static __inline int
  841 xpt_schedule_dev_allocq(struct cam_eb *bus, struct cam_ed *dev)
  842 {
  843         int retval;
  844 
  845         if (dev->ccbq.devq_openings > 0) {
  846                 if ((dev->flags & CAM_DEV_RESIZE_QUEUE_NEEDED) != 0) {
  847                         cam_ccbq_resize(&dev->ccbq,
  848                                         dev->ccbq.dev_openings
  849                                         + dev->ccbq.dev_active);
  850                         dev->flags &= ~CAM_DEV_RESIZE_QUEUE_NEEDED;
  851                 }
  852                 /*
  853                  * The priority of a device waiting for CCB resources
  854                  * is that of the the highest priority peripheral driver
  855                  * enqueued.
  856                  */
  857                 retval = xpt_schedule_dev(&bus->sim->devq->alloc_queue,
  858                                           &dev->alloc_ccb_entry.pinfo,
  859                                           CAMQ_GET_HEAD(&dev->drvq)->priority); 
  860         } else {
  861                 retval = 0;
  862         }
  863 
  864         return (retval);
  865 }
  866 
  867 static __inline int
  868 xpt_schedule_dev_sendq(struct cam_eb *bus, struct cam_ed *dev)
  869 {
  870         int     retval;
  871 
  872         if (dev->ccbq.dev_openings > 0) {
  873                 /*
  874                  * The priority of a device waiting for controller
  875                  * resources is that of the the highest priority CCB
  876                  * enqueued.
  877                  */
  878                 retval =
  879                     xpt_schedule_dev(&bus->sim->devq->send_queue,
  880                                      &dev->send_ccb_entry.pinfo,
  881                                      CAMQ_GET_HEAD(&dev->ccbq.queue)->priority);
  882         } else {
  883                 retval = 0;
  884         }
  885         return (retval);
  886 }
  887 
  888 static __inline int
  889 periph_is_queued(struct cam_periph *periph)
  890 {
  891         return (periph->pinfo.index != CAM_UNQUEUED_INDEX);
  892 }
  893 
  894 static __inline int
  895 device_is_alloc_queued(struct cam_ed *device)
  896 {
  897         return (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
  898 }
  899 
  900 static __inline int
  901 device_is_send_queued(struct cam_ed *device)
  902 {
  903         return (device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
  904 }
  905 
  906 static __inline int
  907 dev_allocq_is_runnable(struct cam_devq *devq)
  908 {
  909         /*
  910          * Have work to do.
  911          * Have space to do more work.
  912          * Allowed to do work.
  913          */
  914         return ((devq->alloc_queue.qfrozen_cnt == 0)
  915              && (devq->alloc_queue.entries > 0)
  916              && (devq->alloc_openings > 0));
  917 }
  918 
  919 static void
  920 xpt_periph_init()
  921 {
  922         make_dev(&xpt_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "xpt0");
  923 }
  924 
  925 static void
  926 probe_periph_init()
  927 {
  928 }
  929 
  930 
  931 static void
  932 xptdone(struct cam_periph *periph, union ccb *done_ccb)
  933 {
  934         /* Caller will release the CCB */
  935         wakeup(&done_ccb->ccb_h.cbfcnp);
  936 }
  937 
  938 static int
  939 xptopen(struct cdev *dev, int flags, int fmt, struct thread *td)
  940 {
  941         int unit;
  942 
  943         unit = minor(dev) & 0xff;
  944 
  945         /*
  946          * Only allow read-write access.
  947          */
  948         if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0))
  949                 return(EPERM);
  950 
  951         /*
  952          * We don't allow nonblocking access.
  953          */
  954         if ((flags & O_NONBLOCK) != 0) {
  955                 printf("xpt%d: can't do nonblocking access\n", unit);
  956                 return(ENODEV);
  957         }
  958 
  959         /*
  960          * We only have one transport layer right now.  If someone accesses
  961          * us via something other than minor number 1, point out their
  962          * mistake.
  963          */
  964         if (unit != 0) {
  965                 printf("xptopen: got invalid xpt unit %d\n", unit);
  966                 return(ENXIO);
  967         }
  968 
  969         /* Mark ourselves open */
  970         xsoftc.flags |= XPT_FLAG_OPEN;
  971         
  972         return(0);
  973 }
  974 
  975 static int
  976 xptclose(struct cdev *dev, int flag, int fmt, struct thread *td)
  977 {
  978         int unit;
  979 
  980         unit = minor(dev) & 0xff;
  981 
  982         /*
  983          * We only have one transport layer right now.  If someone accesses
  984          * us via something other than minor number 1, point out their
  985          * mistake.
  986          */
  987         if (unit != 0) {
  988                 printf("xptclose: got invalid xpt unit %d\n", unit);
  989                 return(ENXIO);
  990         }
  991 
  992         /* Mark ourselves closed */
  993         xsoftc.flags &= ~XPT_FLAG_OPEN;
  994 
  995         return(0);
  996 }
  997 
  998 static int
  999 xptioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
 1000 {
 1001         int unit, error;
 1002 
 1003         error = 0;
 1004         unit = minor(dev) & 0xff;
 1005 
 1006         /*
 1007          * We only have one transport layer right now.  If someone accesses
 1008          * us via something other than minor number 1, point out their
 1009          * mistake.
 1010          */
 1011         if (unit != 0) {
 1012                 printf("xptioctl: got invalid xpt unit %d\n", unit);
 1013                 return(ENXIO);
 1014         }
 1015 
 1016         switch(cmd) {
 1017         /*
 1018          * For the transport layer CAMIOCOMMAND ioctl, we really only want
 1019          * to accept CCB types that don't quite make sense to send through a
 1020          * passthrough driver. XPT_PATH_INQ is an exception to this, as stated
 1021          * in the CAM spec.
 1022          */
 1023         case CAMIOCOMMAND: {
 1024                 union ccb *ccb;
 1025                 union ccb *inccb;
 1026 
 1027                 inccb = (union ccb *)addr;
 1028 
 1029                 switch(inccb->ccb_h.func_code) {
 1030                 case XPT_SCAN_BUS:
 1031                 case XPT_RESET_BUS:
 1032                         if ((inccb->ccb_h.target_id != CAM_TARGET_WILDCARD)
 1033                          || (inccb->ccb_h.target_lun != CAM_LUN_WILDCARD)) {
 1034                                 error = EINVAL;
 1035                                 break;
 1036                         }
 1037                         /* FALLTHROUGH */
 1038                 case XPT_PATH_INQ:
 1039                 case XPT_ENG_INQ:
 1040                 case XPT_SCAN_LUN:
 1041 
 1042                         ccb = xpt_alloc_ccb();
 1043 
 1044                         /*
 1045                          * Create a path using the bus, target, and lun the
 1046                          * user passed in.
 1047                          */
 1048                         if (xpt_create_path(&ccb->ccb_h.path, xpt_periph,
 1049                                             inccb->ccb_h.path_id,
 1050                                             inccb->ccb_h.target_id,
 1051                                             inccb->ccb_h.target_lun) !=
 1052                                             CAM_REQ_CMP){
 1053                                 error = EINVAL;
 1054                                 xpt_free_ccb(ccb);
 1055                                 break;
 1056                         }
 1057                         /* Ensure all of our fields are correct */
 1058                         xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path,
 1059                                       inccb->ccb_h.pinfo.priority);
 1060                         xpt_merge_ccb(ccb, inccb);
 1061                         ccb->ccb_h.cbfcnp = xptdone;
 1062                         cam_periph_runccb(ccb, NULL, 0, 0, NULL);
 1063                         bcopy(ccb, inccb, sizeof(union ccb));
 1064                         xpt_free_path(ccb->ccb_h.path);
 1065                         xpt_free_ccb(ccb);
 1066                         break;
 1067 
 1068                 case XPT_DEBUG: {
 1069                         union ccb ccb;
 1070 
 1071                         /*
 1072                          * This is an immediate CCB, so it's okay to
 1073                          * allocate it on the stack.
 1074                          */
 1075 
 1076                         /*
 1077                          * Create a path using the bus, target, and lun the
 1078                          * user passed in.
 1079                          */
 1080                         if (xpt_create_path(&ccb.ccb_h.path, xpt_periph,
 1081                                             inccb->ccb_h.path_id,
 1082                                             inccb->ccb_h.target_id,
 1083                                             inccb->ccb_h.target_lun) !=
 1084                                             CAM_REQ_CMP){
 1085                                 error = EINVAL;
 1086                                 break;
 1087                         }
 1088                         /* Ensure all of our fields are correct */
 1089                         xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path,
 1090                                       inccb->ccb_h.pinfo.priority);
 1091                         xpt_merge_ccb(&ccb, inccb);
 1092                         ccb.ccb_h.cbfcnp = xptdone;
 1093                         xpt_action(&ccb);
 1094                         bcopy(&ccb, inccb, sizeof(union ccb));
 1095                         xpt_free_path(ccb.ccb_h.path);
 1096                         break;
 1097 
 1098                 }
 1099                 case XPT_DEV_MATCH: {
 1100                         struct cam_periph_map_info mapinfo;
 1101                         struct cam_path *old_path;
 1102 
 1103                         /*
 1104                          * We can't deal with physical addresses for this
 1105                          * type of transaction.
 1106                          */
 1107                         if (inccb->ccb_h.flags & CAM_DATA_PHYS) {
 1108                                 error = EINVAL;
 1109                                 break;
 1110                         }
 1111 
 1112                         /*
 1113                          * Save this in case the caller had it set to
 1114                          * something in particular.
 1115                          */
 1116                         old_path = inccb->ccb_h.path;
 1117 
 1118                         /*
 1119                          * We really don't need a path for the matching
 1120                          * code.  The path is needed because of the
 1121                          * debugging statements in xpt_action().  They
 1122                          * assume that the CCB has a valid path.
 1123                          */
 1124                         inccb->ccb_h.path = xpt_periph->path;
 1125 
 1126                         bzero(&mapinfo, sizeof(mapinfo));
 1127 
 1128                         /*
 1129                          * Map the pattern and match buffers into kernel
 1130                          * virtual address space.
 1131                          */
 1132                         error = cam_periph_mapmem(inccb, &mapinfo);
 1133 
 1134                         if (error) {
 1135                                 inccb->ccb_h.path = old_path;
 1136                                 break;
 1137                         }
 1138 
 1139                         /*
 1140                          * This is an immediate CCB, we can send it on directly.
 1141                          */
 1142                         xpt_action(inccb);
 1143 
 1144                         /*
 1145                          * Map the buffers back into user space.
 1146                          */
 1147                         cam_periph_unmapmem(inccb, &mapinfo);
 1148 
 1149                         inccb->ccb_h.path = old_path;
 1150 
 1151                         error = 0;
 1152                         break;
 1153                 }
 1154                 default:
 1155                         error = ENOTSUP;
 1156                         break;
 1157                 }
 1158                 break;
 1159         }
 1160         /*
 1161          * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input,
 1162          * with the periphal driver name and unit name filled in.  The other
 1163          * fields don't really matter as input.  The passthrough driver name
 1164          * ("pass"), and unit number are passed back in the ccb.  The current
 1165          * device generation number, and the index into the device peripheral
 1166          * driver list, and the status are also passed back.  Note that
 1167          * since we do everything in one pass, unlike the XPT_GDEVLIST ccb,
 1168          * we never return a status of CAM_GDEVLIST_LIST_CHANGED.  It is
 1169          * (or rather should be) impossible for the device peripheral driver
 1170          * list to change since we look at the whole thing in one pass, and
 1171          * we do it with splcam protection.
 1172          * 
 1173          */
 1174         case CAMGETPASSTHRU: {
 1175                 union ccb *ccb;
 1176                 struct cam_periph *periph;
 1177                 struct periph_driver **p_drv;
 1178                 char   *name;
 1179                 u_int unit;
 1180                 u_int cur_generation;
 1181                 int base_periph_found;
 1182                 int splbreaknum;
 1183                 int s;
 1184 
 1185                 ccb = (union ccb *)addr;
 1186                 unit = ccb->cgdl.unit_number;
 1187                 name = ccb->cgdl.periph_name;
 1188                 /*
 1189                  * Every 100 devices, we want to drop our spl protection to
 1190                  * give the software interrupt handler a chance to run.
 1191                  * Most systems won't run into this check, but this should
 1192                  * avoid starvation in the software interrupt handler in
 1193                  * large systems.
 1194                  */
 1195                 splbreaknum = 100;
 1196 
 1197                 ccb = (union ccb *)addr;
 1198 
 1199                 base_periph_found = 0;
 1200 
 1201                 /*
 1202                  * Sanity check -- make sure we don't get a null peripheral
 1203                  * driver name.
 1204                  */
 1205                 if (*ccb->cgdl.periph_name == '\0') {
 1206                         error = EINVAL;
 1207                         break;
 1208                 }
 1209 
 1210                 /* Keep the list from changing while we traverse it */
 1211                 s = splcam();
 1212 ptstartover:
 1213                 cur_generation = xsoftc.generation;
 1214 
 1215                 /* first find our driver in the list of drivers */
 1216                 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++)
 1217                         if (strcmp((*p_drv)->driver_name, name) == 0)
 1218                                 break;
 1219 
 1220                 if (*p_drv == NULL) {
 1221                         splx(s);
 1222                         ccb->ccb_h.status = CAM_REQ_CMP_ERR;
 1223                         ccb->cgdl.status = CAM_GDEVLIST_ERROR;
 1224                         *ccb->cgdl.periph_name = '\0';
 1225                         ccb->cgdl.unit_number = 0;
 1226                         error = ENOENT;
 1227                         break;
 1228                 }       
 1229 
 1230                 /*
 1231                  * Run through every peripheral instance of this driver
 1232                  * and check to see whether it matches the unit passed
 1233                  * in by the user.  If it does, get out of the loops and
 1234                  * find the passthrough driver associated with that
 1235                  * peripheral driver.
 1236                  */
 1237                 for (periph = TAILQ_FIRST(&(*p_drv)->units); periph != NULL;
 1238                      periph = TAILQ_NEXT(periph, unit_links)) {
 1239 
 1240                         if (periph->unit_number == unit) {
 1241                                 break;
 1242                         } else if (--splbreaknum == 0) {
 1243                                 splx(s);
 1244                                 s = splcam();
 1245                                 splbreaknum = 100;
 1246                                 if (cur_generation != xsoftc.generation)
 1247                                        goto ptstartover;
 1248                         }
 1249                 }
 1250                 /*
 1251                  * If we found the peripheral driver that the user passed
 1252                  * in, go through all of the peripheral drivers for that
 1253                  * particular device and look for a passthrough driver.
 1254                  */
 1255                 if (periph != NULL) {
 1256                         struct cam_ed *device;
 1257                         int i;
 1258 
 1259                         base_periph_found = 1;
 1260                         device = periph->path->device;
 1261                         for (i = 0, periph = SLIST_FIRST(&device->periphs);
 1262                              periph != NULL;
 1263                              periph = SLIST_NEXT(periph, periph_links), i++) {
 1264                                 /*
 1265                                  * Check to see whether we have a
 1266                                  * passthrough device or not. 
 1267                                  */
 1268                                 if (strcmp(periph->periph_name, "pass") == 0) {
 1269                                         /*
 1270                                          * Fill in the getdevlist fields.
 1271                                          */
 1272                                         strcpy(ccb->cgdl.periph_name,
 1273                                                periph->periph_name);
 1274                                         ccb->cgdl.unit_number =
 1275                                                 periph->unit_number;
 1276                                         if (SLIST_NEXT(periph, periph_links))
 1277                                                 ccb->cgdl.status =
 1278                                                         CAM_GDEVLIST_MORE_DEVS;
 1279                                         else
 1280                                                 ccb->cgdl.status =
 1281                                                        CAM_GDEVLIST_LAST_DEVICE;
 1282                                         ccb->cgdl.generation =
 1283                                                 device->generation;
 1284                                         ccb->cgdl.index = i;
 1285                                         /*
 1286                                          * Fill in some CCB header fields
 1287                                          * that the user may want.
 1288                                          */
 1289                                         ccb->ccb_h.path_id =
 1290                                                 periph->path->bus->path_id;
 1291                                         ccb->ccb_h.target_id =
 1292                                                 periph->path->target->target_id;
 1293                                         ccb->ccb_h.target_lun =
 1294                                                 periph->path->device->lun_id;
 1295                                         ccb->ccb_h.status = CAM_REQ_CMP;
 1296                                         break;
 1297                                 }
 1298                         }
 1299                 }
 1300 
 1301                 /*
 1302                  * If the periph is null here, one of two things has
 1303                  * happened.  The first possibility is that we couldn't
 1304                  * find the unit number of the particular peripheral driver
 1305                  * that the user is asking about.  e.g. the user asks for
 1306                  * the passthrough driver for "da11".  We find the list of
 1307                  * "da" peripherals all right, but there is no unit 11.
 1308                  * The other possibility is that we went through the list
 1309                  * of peripheral drivers attached to the device structure,
 1310                  * but didn't find one with the name "pass".  Either way,
 1311                  * we return ENOENT, since we couldn't find something.
 1312                  */
 1313                 if (periph == NULL) {
 1314                         ccb->ccb_h.status = CAM_REQ_CMP_ERR;
 1315                         ccb->cgdl.status = CAM_GDEVLIST_ERROR;
 1316                         *ccb->cgdl.periph_name = '\0';
 1317                         ccb->cgdl.unit_number = 0;
 1318                         error = ENOENT;
 1319                         /*
 1320                          * It is unfortunate that this is even necessary,
 1321                          * but there are many, many clueless users out there.
 1322                          * If this is true, the user is looking for the
 1323                          * passthrough driver, but doesn't have one in his
 1324                          * kernel.
 1325                          */
 1326                         if (base_periph_found == 1) {
 1327                                 printf("xptioctl: pass driver is not in the "
 1328                                        "kernel\n");
 1329                                 printf("xptioctl: put \"device pass0\" in "
 1330                                        "your kernel config file\n");
 1331                         }
 1332                 }
 1333                 splx(s);
 1334                 break;
 1335                 }
 1336         default:
 1337                 error = ENOTTY;
 1338                 break;
 1339         }
 1340 
 1341         return(error);
 1342 }
 1343 
 1344 static int
 1345 cam_module_event_handler(module_t mod, int what, void *arg)
 1346 {
 1347         if (what == MOD_LOAD) {
 1348                 xpt_init(NULL);
 1349         } else if (what == MOD_UNLOAD) {
 1350                 return EBUSY;
 1351         } else {
 1352                 return EOPNOTSUPP;
 1353         }
 1354 
 1355         return 0;
 1356 }
 1357 
 1358 /* Functions accessed by the peripheral drivers */
 1359 static void
 1360 xpt_init(dummy)
 1361         void *dummy;
 1362 {
 1363         struct cam_sim *xpt_sim;
 1364         struct cam_path *path;
 1365         struct cam_devq *devq;
 1366         cam_status status;
 1367 
 1368         TAILQ_INIT(&xpt_busses);
 1369         TAILQ_INIT(&cam_bioq);
 1370         TAILQ_INIT(&cam_netq);
 1371         SLIST_INIT(&ccb_freeq);
 1372         STAILQ_INIT(&highpowerq);
 1373 
 1374         /*
 1375          * The xpt layer is, itself, the equivelent of a SIM.
 1376          * Allow 16 ccbs in the ccb pool for it.  This should
 1377          * give decent parallelism when we probe busses and
 1378          * perform other XPT functions.
 1379          */
 1380         devq = cam_simq_alloc(16);
 1381         xpt_sim = cam_sim_alloc(xptaction,
 1382                                 xptpoll,
 1383                                 "xpt",
 1384                                 /*softc*/NULL,
 1385                                 /*unit*/0,
 1386                                 /*max_dev_transactions*/0,
 1387                                 /*max_tagged_dev_transactions*/0,
 1388                                 devq);
 1389         xpt_max_ccbs = 16;
 1390                                 
 1391         xpt_bus_register(xpt_sim, /*bus #*/0);
 1392 
 1393         /*
 1394          * Looking at the XPT from the SIM layer, the XPT is
 1395          * the equivelent of a peripheral driver.  Allocate
 1396          * a peripheral driver entry for us.
 1397          */
 1398         if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID,
 1399                                       CAM_TARGET_WILDCARD,
 1400                                       CAM_LUN_WILDCARD)) != CAM_REQ_CMP) {
 1401                 printf("xpt_init: xpt_create_path failed with status %#x,"
 1402                        " failing attach\n", status);
 1403                 return;
 1404         }
 1405 
 1406         cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO,
 1407                          path, NULL, 0, NULL);
 1408         xpt_free_path(path);
 1409 
 1410         xpt_sim->softc = xpt_periph;
 1411 
 1412         /*
 1413          * Register a callback for when interrupts are enabled.
 1414          */
 1415         xpt_config_hook =
 1416             (struct intr_config_hook *)malloc(sizeof(struct intr_config_hook),
 1417                                               M_TEMP, M_NOWAIT | M_ZERO);
 1418         if (xpt_config_hook == NULL) {
 1419                 printf("xpt_init: Cannot malloc config hook "
 1420                        "- failing attach\n");
 1421                 return;
 1422         }
 1423 
 1424         xpt_config_hook->ich_func = xpt_config;
 1425         if (config_intrhook_establish(xpt_config_hook) != 0) {
 1426                 free (xpt_config_hook, M_TEMP);
 1427                 printf("xpt_init: config_intrhook_establish failed "
 1428                        "- failing attach\n");
 1429         }
 1430 
 1431         /* Install our software interrupt handlers */
 1432         swi_add(NULL, "camnet", camisr, &cam_netq, SWI_CAMNET, 0, &camnet_ih);
 1433         swi_add(NULL, "cambio", camisr, &cam_bioq, SWI_CAMBIO, 0, &cambio_ih);
 1434 }
 1435 
 1436 static cam_status
 1437 xptregister(struct cam_periph *periph, void *arg)
 1438 {
 1439         if (periph == NULL) {
 1440                 printf("xptregister: periph was NULL!!\n");
 1441                 return(CAM_REQ_CMP_ERR);
 1442         }
 1443 
 1444         periph->softc = NULL;
 1445 
 1446         xpt_periph = periph;
 1447 
 1448         return(CAM_REQ_CMP);
 1449 }
 1450 
 1451 int32_t
 1452 xpt_add_periph(struct cam_periph *periph)
 1453 {
 1454         struct cam_ed *device;
 1455         int32_t  status;
 1456         struct periph_list *periph_head;
 1457 
 1458         GIANT_REQUIRED;
 1459 
 1460         device = periph->path->device;
 1461 
 1462         periph_head = &device->periphs;
 1463 
 1464         status = CAM_REQ_CMP;
 1465 
 1466         if (device != NULL) {
 1467                 int s;
 1468 
 1469                 /*
 1470                  * Make room for this peripheral
 1471                  * so it will fit in the queue
 1472                  * when it's scheduled to run
 1473                  */
 1474                 s = splsoftcam();
 1475                 status = camq_resize(&device->drvq,
 1476                                      device->drvq.array_size + 1);
 1477 
 1478                 device->generation++;
 1479 
 1480                 SLIST_INSERT_HEAD(periph_head, periph, periph_links);
 1481 
 1482                 splx(s);
 1483         }
 1484 
 1485         xsoftc.generation++;
 1486 
 1487         return (status);
 1488 }
 1489 
 1490 void
 1491 xpt_remove_periph(struct cam_periph *periph)
 1492 {
 1493         struct cam_ed *device;
 1494 
 1495         GIANT_REQUIRED;
 1496 
 1497         device = periph->path->device;
 1498 
 1499         if (device != NULL) {
 1500                 int s;
 1501                 struct periph_list *periph_head;
 1502 
 1503                 periph_head = &device->periphs;
 1504                 
 1505                 /* Release the slot for this peripheral */
 1506                 s = splsoftcam();
 1507                 camq_resize(&device->drvq, device->drvq.array_size - 1);
 1508 
 1509                 device->generation++;
 1510 
 1511                 SLIST_REMOVE(periph_head, periph, cam_periph, periph_links);
 1512 
 1513                 splx(s);
 1514         }
 1515 
 1516         xsoftc.generation++;
 1517 
 1518 }
 1519 
 1520 #ifdef CAM_NEW_TRAN_CODE
 1521 
 1522 void
 1523 xpt_announce_periph(struct cam_periph *periph, char *announce_string)
 1524 {
 1525         struct  ccb_pathinq cpi;
 1526         struct  ccb_trans_settings cts;
 1527         struct  cam_path *path;
 1528         u_int   speed;
 1529         u_int   freq;
 1530         u_int   mb;
 1531         int     s;
 1532 
 1533         GIANT_REQUIRED;
 1534 
 1535         path = periph->path;
 1536         /*
 1537          * To ensure that this is printed in one piece,
 1538          * mask out CAM interrupts.
 1539          */
 1540         s = splsoftcam();
 1541         printf("%s%d at %s%d bus %d target %d lun %d\n",
 1542                periph->periph_name, periph->unit_number,
 1543                path->bus->sim->sim_name,
 1544                path->bus->sim->unit_number,
 1545                path->bus->sim->bus_id,
 1546                path->target->target_id,
 1547                path->device->lun_id);
 1548         printf("%s%d: ", periph->periph_name, periph->unit_number);
 1549         scsi_print_inquiry(&path->device->inq_data);
 1550         if (bootverbose && path->device->serial_num_len > 0) {
 1551                 /* Don't wrap the screen  - print only the first 60 chars */
 1552                 printf("%s%d: Serial Number %.60s\n", periph->periph_name,
 1553                        periph->unit_number, path->device->serial_num);
 1554         }
 1555         xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1);
 1556         cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
 1557         cts.type = CTS_TYPE_CURRENT_SETTINGS;
 1558         xpt_action((union ccb*)&cts);
 1559 
 1560         /* Ask the SIM for its base transfer speed */
 1561         xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
 1562         cpi.ccb_h.func_code = XPT_PATH_INQ;
 1563         xpt_action((union ccb *)&cpi);
 1564 
 1565         speed = cpi.base_transfer_speed;
 1566         freq = 0;
 1567         if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SPI) {
 1568                 struct  ccb_trans_settings_spi *spi;
 1569 
 1570                 spi = &cts.xport_specific.spi;
 1571                 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0
 1572                   && spi->sync_offset != 0) {
 1573                         freq = scsi_calc_syncsrate(spi->sync_period);
 1574                         speed = freq;
 1575                 }
 1576 
 1577                 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0)
 1578                         speed *= (0x01 << spi->bus_width);
 1579         }
 1580 
 1581         if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_FC) {
 1582                 struct  ccb_trans_settings_fc *fc = &cts.xport_specific.fc;
 1583                 if (fc->valid & CTS_FC_VALID_SPEED) {
 1584                         speed = fc->bitrate;
 1585                 }
 1586         }
 1587 
 1588         mb = speed / 1000;
 1589         if (mb > 0)
 1590                 printf("%s%d: %d.%03dMB/s transfers",
 1591                        periph->periph_name, periph->unit_number,
 1592                        mb, speed % 1000);
 1593         else
 1594                 printf("%s%d: %dKB/s transfers", periph->periph_name,
 1595                        periph->unit_number, speed);
 1596         /* Report additional information about SPI connections */
 1597         if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SPI) {
 1598                 struct  ccb_trans_settings_spi *spi;
 1599 
 1600                 spi = &cts.xport_specific.spi;
 1601                 if (freq != 0) {
 1602                         printf(" (%d.%03dMHz%s, offset %d", freq / 1000,
 1603                                freq % 1000,
 1604                                (spi->ppr_options & MSG_EXT_PPR_DT_REQ) != 0
 1605                              ? " DT" : "",
 1606                                spi->sync_offset);
 1607                 }
 1608                 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0
 1609                  && spi->bus_width > 0) {
 1610                         if (freq != 0) {
 1611                                 printf(", ");
 1612                         } else {
 1613                                 printf(" (");
 1614                         }
 1615                         printf("%dbit)", 8 * (0x01 << spi->bus_width));
 1616                 } else if (freq != 0) {
 1617                         printf(")");
 1618                 }
 1619         }
 1620         if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_FC) {
 1621                 struct  ccb_trans_settings_fc *fc;
 1622 
 1623                 fc = &cts.xport_specific.fc;
 1624                 if (fc->valid & CTS_FC_VALID_WWNN)
 1625                         printf(" WWNN 0x%llx", (long long) fc->wwnn);
 1626                 if (fc->valid & CTS_FC_VALID_WWPN)
 1627                         printf(" WWPN 0x%llx", (long long) fc->wwpn);
 1628                 if (fc->valid & CTS_FC_VALID_PORT)
 1629                         printf(" PortID 0x%x", fc->port);
 1630         }
 1631 
 1632         if (path->device->inq_flags & SID_CmdQue
 1633          || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
 1634                 printf("\n%s%d: Tagged Queueing Enabled",
 1635                        periph->periph_name, periph->unit_number);
 1636         }
 1637         printf("\n");
 1638 
 1639         /*
 1640          * We only want to print the caller's announce string if they've
 1641          * passed one in..
 1642          */
 1643         if (announce_string != NULL)
 1644                 printf("%s%d: %s\n", periph->periph_name,
 1645                        periph->unit_number, announce_string);
 1646         splx(s);
 1647 }
 1648 #else /* CAM_NEW_TRAN_CODE */
 1649 void
 1650 xpt_announce_periph(struct cam_periph *periph, char *announce_string)
 1651 {
 1652         int s;
 1653         u_int mb;
 1654         struct cam_path *path;
 1655         struct ccb_trans_settings cts;
 1656 
 1657         GIANT_REQUIRED;
 1658 
 1659         path = periph->path;
 1660         /*
 1661          * To ensure that this is printed in one piece,
 1662          * mask out CAM interrupts.
 1663          */
 1664         s = splsoftcam();
 1665         printf("%s%d at %s%d bus %d target %d lun %d\n",
 1666                periph->periph_name, periph->unit_number,
 1667                path->bus->sim->sim_name,
 1668                path->bus->sim->unit_number,
 1669                path->bus->sim->bus_id,
 1670                path->target->target_id,
 1671                path->device->lun_id);
 1672         printf("%s%d: ", periph->periph_name, periph->unit_number);
 1673         scsi_print_inquiry(&path->device->inq_data);
 1674         if ((bootverbose)
 1675          && (path->device->serial_num_len > 0)) {
 1676                 /* Don't wrap the screen  - print only the first 60 chars */
 1677                 printf("%s%d: Serial Number %.60s\n", periph->periph_name,
 1678                        periph->unit_number, path->device->serial_num);
 1679         }
 1680         xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1);
 1681         cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
 1682         cts.flags = CCB_TRANS_CURRENT_SETTINGS;
 1683         xpt_action((union ccb*)&cts);
 1684         if (cts.ccb_h.status == CAM_REQ_CMP) {
 1685                 u_int speed;
 1686                 u_int freq;
 1687 
 1688                 if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
 1689                   && cts.sync_offset != 0) {
 1690                         freq = scsi_calc_syncsrate(cts.sync_period);
 1691                         speed = freq;
 1692                 } else {
 1693                         struct ccb_pathinq cpi;
 1694 
 1695                         /* Ask the SIM for its base transfer speed */
 1696                         xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
 1697                         cpi.ccb_h.func_code = XPT_PATH_INQ;
 1698                         xpt_action((union ccb *)&cpi);
 1699 
 1700                         speed = cpi.base_transfer_speed;
 1701                         freq = 0;
 1702                 }
 1703                 if ((cts.valid & CCB_TRANS_BUS_WIDTH_VALID) != 0)
 1704                         speed *= (0x01 << cts.bus_width);
 1705                 mb = speed / 1000;
 1706                 if (mb > 0)
 1707                         printf("%s%d: %d.%03dMB/s transfers",
 1708                                periph->periph_name, periph->unit_number,
 1709                                mb, speed % 1000);
 1710                 else
 1711                         printf("%s%d: %dKB/s transfers", periph->periph_name,
 1712                                periph->unit_number, speed);
 1713                 if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
 1714                  && cts.sync_offset != 0) {
 1715                         printf(" (%d.%03dMHz, offset %d", freq / 1000,
 1716                                freq % 1000, cts.sync_offset);
 1717                 }
 1718                 if ((cts.valid & CCB_TRANS_BUS_WIDTH_VALID) != 0
 1719                  && cts.bus_width > 0) {
 1720                         if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
 1721                          && cts.sync_offset != 0) {
 1722                                 printf(", ");
 1723                         } else {
 1724                                 printf(" (");
 1725                         }
 1726                         printf("%dbit)", 8 * (0x01 << cts.bus_width));
 1727                 } else if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
 1728                         && cts.sync_offset != 0) {
 1729                         printf(")");
 1730                 }
 1731 
 1732                 if (path->device->inq_flags & SID_CmdQue
 1733                  || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
 1734                         printf(", Tagged Queueing Enabled");
 1735                 }
 1736 
 1737                 printf("\n");
 1738         } else if (path->device->inq_flags & SID_CmdQue
 1739                 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
 1740                 printf("%s%d: Tagged Queueing Enabled\n",
 1741                        periph->periph_name, periph->unit_number);
 1742         }
 1743 
 1744         /*
 1745          * We only want to print the caller's announce string if they've
 1746          * passed one in..
 1747          */
 1748         if (announce_string != NULL)
 1749                 printf("%s%d: %s\n", periph->periph_name,
 1750                        periph->unit_number, announce_string);
 1751         splx(s);
 1752 }
 1753 
 1754 #endif /* CAM_NEW_TRAN_CODE */
 1755 
 1756 static dev_match_ret
 1757 xptbusmatch(struct dev_match_pattern *patterns, u_int num_patterns,
 1758             struct cam_eb *bus)
 1759 {
 1760         dev_match_ret retval;
 1761         int i;
 1762 
 1763         retval = DM_RET_NONE;
 1764 
 1765         /*
 1766          * If we aren't given something to match against, that's an error.
 1767          */
 1768         if (bus == NULL)
 1769                 return(DM_RET_ERROR);
 1770 
 1771         /*
 1772          * If there are no match entries, then this bus matches no
 1773          * matter what.
 1774          */
 1775         if ((patterns == NULL) || (num_patterns == 0))
 1776                 return(DM_RET_DESCEND | DM_RET_COPY);
 1777 
 1778         for (i = 0; i < num_patterns; i++) {
 1779                 struct bus_match_pattern *cur_pattern;
 1780 
 1781                 /*
 1782                  * If the pattern in question isn't for a bus node, we
 1783                  * aren't interested.  However, we do indicate to the
 1784                  * calling routine that we should continue descending the
 1785                  * tree, since the user wants to match against lower-level
 1786                  * EDT elements.
 1787                  */
 1788                 if (patterns[i].type != DEV_MATCH_BUS) {
 1789                         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
 1790                                 retval |= DM_RET_DESCEND;
 1791                         continue;
 1792                 }
 1793 
 1794                 cur_pattern = &patterns[i].pattern.bus_pattern;
 1795 
 1796                 /*
 1797                  * If they want to match any bus node, we give them any
 1798                  * device node.
 1799                  */
 1800                 if (cur_pattern->flags == BUS_MATCH_ANY) {
 1801                         /* set the copy flag */
 1802                         retval |= DM_RET_COPY;
 1803 
 1804                         /*
 1805                          * If we've already decided on an action, go ahead
 1806                          * and return.
 1807                          */
 1808                         if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
 1809                                 return(retval);
 1810                 }
 1811 
 1812                 /*
 1813                  * Not sure why someone would do this...
 1814                  */
 1815                 if (cur_pattern->flags == BUS_MATCH_NONE)
 1816                         continue;
 1817 
 1818                 if (((cur_pattern->flags & BUS_MATCH_PATH) != 0)
 1819                  && (cur_pattern->path_id != bus->path_id))
 1820                         continue;
 1821 
 1822                 if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0)
 1823                  && (cur_pattern->bus_id != bus->sim->bus_id))
 1824                         continue;
 1825 
 1826                 if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0)
 1827                  && (cur_pattern->unit_number != bus->sim->unit_number))
 1828                         continue;
 1829 
 1830                 if (((cur_pattern->flags & BUS_MATCH_NAME) != 0)
 1831                  && (strncmp(cur_pattern->dev_name, bus->sim->sim_name,
 1832                              DEV_IDLEN) != 0))
 1833                         continue;
 1834 
 1835                 /*
 1836                  * If we get to this point, the user definitely wants 
 1837                  * information on this bus.  So tell the caller to copy the
 1838                  * data out.
 1839                  */
 1840                 retval |= DM_RET_COPY;
 1841 
 1842                 /*
 1843                  * If the return action has been set to descend, then we
 1844                  * know that we've already seen a non-bus matching
 1845                  * expression, therefore we need to further descend the tree.
 1846                  * This won't change by continuing around the loop, so we
 1847                  * go ahead and return.  If we haven't seen a non-bus
 1848                  * matching expression, we keep going around the loop until
 1849                  * we exhaust the matching expressions.  We'll set the stop
 1850                  * flag once we fall out of the loop.
 1851                  */
 1852                 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
 1853                         return(retval);
 1854         }
 1855 
 1856         /*
 1857          * If the return action hasn't been set to descend yet, that means
 1858          * we haven't seen anything other than bus matching patterns.  So
 1859          * tell the caller to stop descending the tree -- the user doesn't
 1860          * want to match against lower level tree elements.
 1861          */
 1862         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
 1863                 retval |= DM_RET_STOP;
 1864 
 1865         return(retval);
 1866 }
 1867 
 1868 static dev_match_ret
 1869 xptdevicematch(struct dev_match_pattern *patterns, u_int num_patterns,
 1870                struct cam_ed *device)
 1871 {
 1872         dev_match_ret retval;
 1873         int i;
 1874 
 1875         retval = DM_RET_NONE;
 1876 
 1877         /*
 1878          * If we aren't given something to match against, that's an error.
 1879          */
 1880         if (device == NULL)
 1881                 return(DM_RET_ERROR);
 1882 
 1883         /*
 1884          * If there are no match entries, then this device matches no
 1885          * matter what.
 1886          */
 1887         if ((patterns == NULL) || (num_patterns == 0))
 1888                 return(DM_RET_DESCEND | DM_RET_COPY);
 1889 
 1890         for (i = 0; i < num_patterns; i++) {
 1891                 struct device_match_pattern *cur_pattern;
 1892 
 1893                 /*
 1894                  * If the pattern in question isn't for a device node, we
 1895                  * aren't interested.
 1896                  */
 1897                 if (patterns[i].type != DEV_MATCH_DEVICE) {
 1898                         if ((patterns[i].type == DEV_MATCH_PERIPH)
 1899                          && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE))
 1900                                 retval |= DM_RET_DESCEND;
 1901                         continue;
 1902                 }
 1903 
 1904                 cur_pattern = &patterns[i].pattern.device_pattern;
 1905 
 1906                 /*
 1907                  * If they want to match any device node, we give them any
 1908                  * device node.
 1909                  */
 1910                 if (cur_pattern->flags == DEV_MATCH_ANY) {
 1911                         /* set the copy flag */
 1912                         retval |= DM_RET_COPY;
 1913 
 1914                         
 1915                         /*
 1916                          * If we've already decided on an action, go ahead
 1917                          * and return.
 1918                          */
 1919                         if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
 1920                                 return(retval);
 1921                 }
 1922 
 1923                 /*
 1924                  * Not sure why someone would do this...
 1925                  */
 1926                 if (cur_pattern->flags == DEV_MATCH_NONE)
 1927                         continue;
 1928 
 1929                 if (((cur_pattern->flags & DEV_MATCH_PATH) != 0)
 1930                  && (cur_pattern->path_id != device->target->bus->path_id))
 1931                         continue;
 1932 
 1933                 if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0)
 1934                  && (cur_pattern->target_id != device->target->target_id))
 1935                         continue;
 1936 
 1937                 if (((cur_pattern->flags & DEV_MATCH_LUN) != 0)
 1938                  && (cur_pattern->target_lun != device->lun_id))
 1939                         continue;
 1940 
 1941                 if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0)
 1942                  && (cam_quirkmatch((caddr_t)&device->inq_data,
 1943                                     (caddr_t)&cur_pattern->inq_pat,
 1944                                     1, sizeof(cur_pattern->inq_pat),
 1945                                     scsi_static_inquiry_match) == NULL))
 1946                         continue;
 1947 
 1948                 /*
 1949                  * If we get to this point, the user definitely wants 
 1950                  * information on this device.  So tell the caller to copy
 1951                  * the data out.
 1952                  */
 1953                 retval |= DM_RET_COPY;
 1954 
 1955                 /*
 1956                  * If the return action has been set to descend, then we
 1957                  * know that we've already seen a peripheral matching
 1958                  * expression, therefore we need to further descend the tree.
 1959                  * This won't change by continuing around the loop, so we
 1960                  * go ahead and return.  If we haven't seen a peripheral
 1961                  * matching expression, we keep going around the loop until
 1962                  * we exhaust the matching expressions.  We'll set the stop
 1963                  * flag once we fall out of the loop.
 1964                  */
 1965                 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
 1966                         return(retval);
 1967         }
 1968 
 1969         /*
 1970          * If the return action hasn't been set to descend yet, that means
 1971          * we haven't seen any peripheral matching patterns.  So tell the
 1972          * caller to stop descending the tree -- the user doesn't want to
 1973          * match against lower level tree elements.
 1974          */
 1975         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
 1976                 retval |= DM_RET_STOP;
 1977 
 1978         return(retval);
 1979 }
 1980 
 1981 /*
 1982  * Match a single peripheral against any number of match patterns.
 1983  */
 1984 static dev_match_ret
 1985 xptperiphmatch(struct dev_match_pattern *patterns, u_int num_patterns,
 1986                struct cam_periph *periph)
 1987 {
 1988         dev_match_ret retval;
 1989         int i;
 1990 
 1991         /*
 1992          * If we aren't given something to match against, that's an error.
 1993          */
 1994         if (periph == NULL)
 1995                 return(DM_RET_ERROR);
 1996 
 1997         /*
 1998          * If there are no match entries, then this peripheral matches no
 1999          * matter what.
 2000          */
 2001         if ((patterns == NULL) || (num_patterns == 0))
 2002                 return(DM_RET_STOP | DM_RET_COPY);
 2003 
 2004         /*
 2005          * There aren't any nodes below a peripheral node, so there's no
 2006          * reason to descend the tree any further.
 2007          */
 2008         retval = DM_RET_STOP;
 2009 
 2010         for (i = 0; i < num_patterns; i++) {
 2011                 struct periph_match_pattern *cur_pattern;
 2012 
 2013                 /*
 2014                  * If the pattern in question isn't for a peripheral, we
 2015                  * aren't interested.
 2016                  */
 2017                 if (patterns[i].type != DEV_MATCH_PERIPH)
 2018                         continue;
 2019 
 2020                 cur_pattern = &patterns[i].pattern.periph_pattern;
 2021 
 2022                 /*
 2023                  * If they want to match on anything, then we will do so.
 2024                  */
 2025                 if (cur_pattern->flags == PERIPH_MATCH_ANY) {
 2026                         /* set the copy flag */
 2027                         retval |= DM_RET_COPY;
 2028 
 2029                         /*
 2030                          * We've already set the return action to stop,
 2031                          * since there are no nodes below peripherals in
 2032                          * the tree.
 2033                          */
 2034                         return(retval);
 2035                 }
 2036 
 2037                 /*
 2038                  * Not sure why someone would do this...
 2039                  */
 2040                 if (cur_pattern->flags == PERIPH_MATCH_NONE)
 2041                         continue;
 2042 
 2043                 if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0)
 2044                  && (cur_pattern->path_id != periph->path->bus->path_id))
 2045                         continue;
 2046 
 2047                 /*
 2048                  * For the target and lun id's, we have to make sure the
 2049                  * target and lun pointers aren't NULL.  The xpt peripheral
 2050                  * has a wildcard target and device.
 2051                  */
 2052                 if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0)
 2053                  && ((periph->path->target == NULL)
 2054                  ||(cur_pattern->target_id != periph->path->target->target_id)))
 2055                         continue;
 2056 
 2057                 if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0)
 2058                  && ((periph->path->device == NULL)
 2059                  || (cur_pattern->target_lun != periph->path->device->lun_id)))
 2060                         continue;
 2061 
 2062                 if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0)
 2063                  && (cur_pattern->unit_number != periph->unit_number))
 2064                         continue;
 2065 
 2066                 if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0)
 2067                  && (strncmp(cur_pattern->periph_name, periph->periph_name,
 2068                              DEV_IDLEN) != 0))
 2069                         continue;
 2070 
 2071                 /*
 2072                  * If we get to this point, the user definitely wants 
 2073                  * information on this peripheral.  So tell the caller to
 2074                  * copy the data out.
 2075                  */
 2076                 retval |= DM_RET_COPY;
 2077 
 2078                 /*
 2079                  * The return action has already been set to stop, since
 2080                  * peripherals don't have any nodes below them in the EDT.
 2081                  */
 2082                 return(retval);
 2083         }
 2084 
 2085         /*
 2086          * If we get to this point, the peripheral that was passed in
 2087          * doesn't match any of the patterns.
 2088          */
 2089         return(retval);
 2090 }
 2091 
 2092 static int
 2093 xptedtbusfunc(struct cam_eb *bus, void *arg)
 2094 {
 2095         struct ccb_dev_match *cdm;
 2096         dev_match_ret retval;
 2097 
 2098         cdm = (struct ccb_dev_match *)arg;
 2099 
 2100         /*
 2101          * If our position is for something deeper in the tree, that means
 2102          * that we've already seen this node.  So, we keep going down.
 2103          */
 2104         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 2105          && (cdm->pos.cookie.bus == bus)
 2106          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 2107          && (cdm->pos.cookie.target != NULL))
 2108                 retval = DM_RET_DESCEND;
 2109         else
 2110                 retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus);
 2111 
 2112         /*
 2113          * If we got an error, bail out of the search.
 2114          */
 2115         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
 2116                 cdm->status = CAM_DEV_MATCH_ERROR;
 2117                 return(0);
 2118         }
 2119 
 2120         /*
 2121          * If the copy flag is set, copy this bus out.
 2122          */
 2123         if (retval & DM_RET_COPY) {
 2124                 int spaceleft, j;
 2125 
 2126                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
 2127                         sizeof(struct dev_match_result));
 2128 
 2129                 /*
 2130                  * If we don't have enough space to put in another
 2131                  * match result, save our position and tell the
 2132                  * user there are more devices to check.
 2133                  */
 2134                 if (spaceleft < sizeof(struct dev_match_result)) {
 2135                         bzero(&cdm->pos, sizeof(cdm->pos));
 2136                         cdm->pos.position_type = 
 2137                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS;
 2138 
 2139                         cdm->pos.cookie.bus = bus;
 2140                         cdm->pos.generations[CAM_BUS_GENERATION]=
 2141                                 bus_generation;
 2142                         cdm->status = CAM_DEV_MATCH_MORE;
 2143                         return(0);
 2144                 }
 2145                 j = cdm->num_matches;
 2146                 cdm->num_matches++;
 2147                 cdm->matches[j].type = DEV_MATCH_BUS;
 2148                 cdm->matches[j].result.bus_result.path_id = bus->path_id;
 2149                 cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id;
 2150                 cdm->matches[j].result.bus_result.unit_number =
 2151                         bus->sim->unit_number;
 2152                 strncpy(cdm->matches[j].result.bus_result.dev_name,
 2153                         bus->sim->sim_name, DEV_IDLEN);
 2154         }
 2155 
 2156         /*
 2157          * If the user is only interested in busses, there's no
 2158          * reason to descend to the next level in the tree.
 2159          */
 2160         if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
 2161                 return(1);
 2162 
 2163         /*
 2164          * If there is a target generation recorded, check it to
 2165          * make sure the target list hasn't changed.
 2166          */
 2167         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 2168          && (bus == cdm->pos.cookie.bus)
 2169          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 2170          && (cdm->pos.generations[CAM_TARGET_GENERATION] != 0)
 2171          && (cdm->pos.generations[CAM_TARGET_GENERATION] !=
 2172              bus->generation)) {
 2173                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
 2174                 return(0);
 2175         }
 2176 
 2177         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 2178          && (cdm->pos.cookie.bus == bus)
 2179          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 2180          && (cdm->pos.cookie.target != NULL))
 2181                 return(xpttargettraverse(bus,
 2182                                         (struct cam_et *)cdm->pos.cookie.target,
 2183                                          xptedttargetfunc, arg));
 2184         else
 2185                 return(xpttargettraverse(bus, NULL, xptedttargetfunc, arg));
 2186 }
 2187 
 2188 static int
 2189 xptedttargetfunc(struct cam_et *target, void *arg)
 2190 {
 2191         struct ccb_dev_match *cdm;
 2192 
 2193         cdm = (struct ccb_dev_match *)arg;
 2194 
 2195         /*
 2196          * If there is a device list generation recorded, check it to
 2197          * make sure the device list hasn't changed.
 2198          */
 2199         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 2200          && (cdm->pos.cookie.bus == target->bus)
 2201          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 2202          && (cdm->pos.cookie.target == target)
 2203          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
 2204          && (cdm->pos.generations[CAM_DEV_GENERATION] != 0)
 2205          && (cdm->pos.generations[CAM_DEV_GENERATION] !=
 2206              target->generation)) {
 2207                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
 2208                 return(0);
 2209         }
 2210 
 2211         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 2212          && (cdm->pos.cookie.bus == target->bus)
 2213          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 2214          && (cdm->pos.cookie.target == target)
 2215          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
 2216          && (cdm->pos.cookie.device != NULL))
 2217                 return(xptdevicetraverse(target,
 2218                                         (struct cam_ed *)cdm->pos.cookie.device,
 2219                                          xptedtdevicefunc, arg));
 2220         else
 2221                 return(xptdevicetraverse(target, NULL, xptedtdevicefunc, arg));
 2222 }
 2223 
 2224 static int
 2225 xptedtdevicefunc(struct cam_ed *device, void *arg)
 2226 {
 2227 
 2228         struct ccb_dev_match *cdm;
 2229         dev_match_ret retval;
 2230 
 2231         cdm = (struct ccb_dev_match *)arg;
 2232 
 2233         /*
 2234          * If our position is for something deeper in the tree, that means
 2235          * that we've already seen this node.  So, we keep going down.
 2236          */
 2237         if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE)
 2238          && (cdm->pos.cookie.device == device)
 2239          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
 2240          && (cdm->pos.cookie.periph != NULL))
 2241                 retval = DM_RET_DESCEND;
 2242         else
 2243                 retval = xptdevicematch(cdm->patterns, cdm->num_patterns,
 2244                                         device);
 2245 
 2246         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
 2247                 cdm->status = CAM_DEV_MATCH_ERROR;
 2248                 return(0);
 2249         }
 2250 
 2251         /*
 2252          * If the copy flag is set, copy this device out.
 2253          */
 2254         if (retval & DM_RET_COPY) {
 2255                 int spaceleft, j;
 2256 
 2257                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
 2258                         sizeof(struct dev_match_result));
 2259 
 2260                 /*
 2261                  * If we don't have enough space to put in another
 2262                  * match result, save our position and tell the
 2263                  * user there are more devices to check.
 2264                  */
 2265                 if (spaceleft < sizeof(struct dev_match_result)) {
 2266                         bzero(&cdm->pos, sizeof(cdm->pos));
 2267                         cdm->pos.position_type = 
 2268                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
 2269                                 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE;
 2270 
 2271                         cdm->pos.cookie.bus = device->target->bus;
 2272                         cdm->pos.generations[CAM_BUS_GENERATION]=
 2273                                 bus_generation;
 2274                         cdm->pos.cookie.target = device->target;
 2275                         cdm->pos.generations[CAM_TARGET_GENERATION] =
 2276                                 device->target->bus->generation;
 2277                         cdm->pos.cookie.device = device;
 2278                         cdm->pos.generations[CAM_DEV_GENERATION] = 
 2279                                 device->target->generation;
 2280                         cdm->status = CAM_DEV_MATCH_MORE;
 2281                         return(0);
 2282                 }
 2283                 j = cdm->num_matches;
 2284                 cdm->num_matches++;
 2285                 cdm->matches[j].type = DEV_MATCH_DEVICE;
 2286                 cdm->matches[j].result.device_result.path_id =
 2287                         device->target->bus->path_id;
 2288                 cdm->matches[j].result.device_result.target_id =
 2289                         device->target->target_id;
 2290                 cdm->matches[j].result.device_result.target_lun =
 2291                         device->lun_id;
 2292                 bcopy(&device->inq_data,
 2293                       &cdm->matches[j].result.device_result.inq_data,
 2294                       sizeof(struct scsi_inquiry_data));
 2295 
 2296                 /* Let the user know whether this device is unconfigured */
 2297                 if (device->flags & CAM_DEV_UNCONFIGURED)
 2298                         cdm->matches[j].result.device_result.flags =
 2299                                 DEV_RESULT_UNCONFIGURED;
 2300                 else
 2301                         cdm->matches[j].result.device_result.flags =
 2302                                 DEV_RESULT_NOFLAG;
 2303         }
 2304 
 2305         /*
 2306          * If the user isn't interested in peripherals, don't descend
 2307          * the tree any further.
 2308          */
 2309         if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
 2310                 return(1);
 2311 
 2312         /*
 2313          * If there is a peripheral list generation recorded, make sure
 2314          * it hasn't changed.
 2315          */
 2316         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 2317          && (device->target->bus == cdm->pos.cookie.bus)
 2318          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 2319          && (device->target == cdm->pos.cookie.target)
 2320          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
 2321          && (device == cdm->pos.cookie.device)
 2322          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
 2323          && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
 2324          && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
 2325              device->generation)){
 2326                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
 2327                 return(0);
 2328         }
 2329 
 2330         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 2331          && (cdm->pos.cookie.bus == device->target->bus)
 2332          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 2333          && (cdm->pos.cookie.target == device->target)
 2334          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
 2335          && (cdm->pos.cookie.device == device)
 2336          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
 2337          && (cdm->pos.cookie.periph != NULL))
 2338                 return(xptperiphtraverse(device,
 2339                                 (struct cam_periph *)cdm->pos.cookie.periph,
 2340                                 xptedtperiphfunc, arg));
 2341         else
 2342                 return(xptperiphtraverse(device, NULL, xptedtperiphfunc, arg));
 2343 }
 2344 
 2345 static int
 2346 xptedtperiphfunc(struct cam_periph *periph, void *arg)
 2347 {
 2348         struct ccb_dev_match *cdm;
 2349         dev_match_ret retval;
 2350 
 2351         cdm = (struct ccb_dev_match *)arg;
 2352 
 2353         retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
 2354 
 2355         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
 2356                 cdm->status = CAM_DEV_MATCH_ERROR;
 2357                 return(0);
 2358         }
 2359 
 2360         /*
 2361          * If the copy flag is set, copy this peripheral out.
 2362          */
 2363         if (retval & DM_RET_COPY) {
 2364                 int spaceleft, j;
 2365 
 2366                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
 2367                         sizeof(struct dev_match_result));
 2368 
 2369                 /*
 2370                  * If we don't have enough space to put in another
 2371                  * match result, save our position and tell the
 2372                  * user there are more devices to check.
 2373                  */
 2374                 if (spaceleft < sizeof(struct dev_match_result)) {
 2375                         bzero(&cdm->pos, sizeof(cdm->pos));
 2376                         cdm->pos.position_type = 
 2377                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
 2378                                 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE |
 2379                                 CAM_DEV_POS_PERIPH;
 2380 
 2381                         cdm->pos.cookie.bus = periph->path->bus;
 2382                         cdm->pos.generations[CAM_BUS_GENERATION]=
 2383                                 bus_generation;
 2384                         cdm->pos.cookie.target = periph->path->target;
 2385                         cdm->pos.generations[CAM_TARGET_GENERATION] =
 2386                                 periph->path->bus->generation;
 2387                         cdm->pos.cookie.device = periph->path->device;
 2388                         cdm->pos.generations[CAM_DEV_GENERATION] = 
 2389                                 periph->path->target->generation;
 2390                         cdm->pos.cookie.periph = periph;
 2391                         cdm->pos.generations[CAM_PERIPH_GENERATION] =
 2392                                 periph->path->device->generation;
 2393                         cdm->status = CAM_DEV_MATCH_MORE;
 2394                         return(0);
 2395                 }
 2396 
 2397                 j = cdm->num_matches;
 2398                 cdm->num_matches++;
 2399                 cdm->matches[j].type = DEV_MATCH_PERIPH;
 2400                 cdm->matches[j].result.periph_result.path_id =
 2401                         periph->path->bus->path_id;
 2402                 cdm->matches[j].result.periph_result.target_id =
 2403                         periph->path->target->target_id;
 2404                 cdm->matches[j].result.periph_result.target_lun =
 2405                         periph->path->device->lun_id;
 2406                 cdm->matches[j].result.periph_result.unit_number =
 2407                         periph->unit_number;
 2408                 strncpy(cdm->matches[j].result.periph_result.periph_name,
 2409                         periph->periph_name, DEV_IDLEN);
 2410         }
 2411 
 2412         return(1);
 2413 }
 2414 
 2415 static int
 2416 xptedtmatch(struct ccb_dev_match *cdm)
 2417 {
 2418         int ret;
 2419 
 2420         cdm->num_matches = 0;
 2421 
 2422         /*
 2423          * Check the bus list generation.  If it has changed, the user
 2424          * needs to reset everything and start over.
 2425          */
 2426         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 2427          && (cdm->pos.generations[CAM_BUS_GENERATION] != 0)
 2428          && (cdm->pos.generations[CAM_BUS_GENERATION] != bus_generation)) {
 2429                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
 2430                 return(0);
 2431         }
 2432 
 2433         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 2434          && (cdm->pos.cookie.bus != NULL))
 2435                 ret = xptbustraverse((struct cam_eb *)cdm->pos.cookie.bus,
 2436                                      xptedtbusfunc, cdm);
 2437         else
 2438                 ret = xptbustraverse(NULL, xptedtbusfunc, cdm);
 2439 
 2440         /*
 2441          * If we get back 0, that means that we had to stop before fully
 2442          * traversing the EDT.  It also means that one of the subroutines
 2443          * has set the status field to the proper value.  If we get back 1,
 2444          * we've fully traversed the EDT and copied out any matching entries.
 2445          */
 2446         if (ret == 1)
 2447                 cdm->status = CAM_DEV_MATCH_LAST;
 2448 
 2449         return(ret);
 2450 }
 2451 
 2452 static int
 2453 xptplistpdrvfunc(struct periph_driver **pdrv, void *arg)
 2454 {
 2455         struct ccb_dev_match *cdm;
 2456 
 2457         cdm = (struct ccb_dev_match *)arg;
 2458 
 2459         if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
 2460          && (cdm->pos.cookie.pdrv == pdrv)
 2461          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
 2462          && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
 2463          && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
 2464              (*pdrv)->generation)) {
 2465                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
 2466                 return(0);
 2467         }
 2468 
 2469         if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
 2470          && (cdm->pos.cookie.pdrv == pdrv)
 2471          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
 2472          && (cdm->pos.cookie.periph != NULL))
 2473                 return(xptpdperiphtraverse(pdrv,
 2474                                 (struct cam_periph *)cdm->pos.cookie.periph,
 2475                                 xptplistperiphfunc, arg));
 2476         else
 2477                 return(xptpdperiphtraverse(pdrv, NULL,xptplistperiphfunc, arg));
 2478 }
 2479 
 2480 static int
 2481 xptplistperiphfunc(struct cam_periph *periph, void *arg)
 2482 {
 2483         struct ccb_dev_match *cdm;
 2484         dev_match_ret retval;
 2485 
 2486         cdm = (struct ccb_dev_match *)arg;
 2487 
 2488         retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
 2489 
 2490         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
 2491                 cdm->status = CAM_DEV_MATCH_ERROR;
 2492                 return(0);
 2493         }
 2494 
 2495         /*
 2496          * If the copy flag is set, copy this peripheral out.
 2497          */
 2498         if (retval & DM_RET_COPY) {
 2499                 int spaceleft, j;
 2500 
 2501                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
 2502                         sizeof(struct dev_match_result));
 2503 
 2504                 /*
 2505                  * If we don't have enough space to put in another
 2506                  * match result, save our position and tell the
 2507                  * user there are more devices to check.
 2508                  */
 2509                 if (spaceleft < sizeof(struct dev_match_result)) {
 2510                         struct periph_driver **pdrv;
 2511 
 2512                         pdrv = NULL;
 2513                         bzero(&cdm->pos, sizeof(cdm->pos));
 2514                         cdm->pos.position_type = 
 2515                                 CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR |
 2516                                 CAM_DEV_POS_PERIPH;
 2517 
 2518                         /*
 2519                          * This may look a bit non-sensical, but it is
 2520                          * actually quite logical.  There are very few
 2521                          * peripheral drivers, and bloating every peripheral
 2522                          * structure with a pointer back to its parent
 2523                          * peripheral driver linker set entry would cost
 2524                          * more in the long run than doing this quick lookup.
 2525                          */
 2526                         for (pdrv = periph_drivers; *pdrv != NULL; pdrv++) {
 2527                                 if (strcmp((*pdrv)->driver_name,
 2528                                     periph->periph_name) == 0)
 2529                                         break;
 2530                         }
 2531 
 2532                         if (pdrv == NULL) {
 2533                                 cdm->status = CAM_DEV_MATCH_ERROR;
 2534                                 return(0);
 2535                         }
 2536 
 2537                         cdm->pos.cookie.pdrv = pdrv;
 2538                         /*
 2539                          * The periph generation slot does double duty, as
 2540                          * does the periph pointer slot.  They are used for
 2541                          * both edt and pdrv lookups and positioning.
 2542                          */
 2543                         cdm->pos.cookie.periph = periph;
 2544                         cdm->pos.generations[CAM_PERIPH_GENERATION] =
 2545                                 (*pdrv)->generation;
 2546                         cdm->status = CAM_DEV_MATCH_MORE;
 2547                         return(0);
 2548                 }
 2549 
 2550                 j = cdm->num_matches;
 2551                 cdm->num_matches++;
 2552                 cdm->matches[j].type = DEV_MATCH_PERIPH;
 2553                 cdm->matches[j].result.periph_result.path_id =
 2554                         periph->path->bus->path_id;
 2555 
 2556                 /*
 2557                  * The transport layer peripheral doesn't have a target or
 2558                  * lun.
 2559                  */
 2560                 if (periph->path->target)
 2561                         cdm->matches[j].result.periph_result.target_id =
 2562                                 periph->path->target->target_id;
 2563                 else
 2564                         cdm->matches[j].result.periph_result.target_id = -1;
 2565 
 2566                 if (periph->path->device)
 2567                         cdm->matches[j].result.periph_result.target_lun =
 2568                                 periph->path->device->lun_id;
 2569                 else
 2570                         cdm->matches[j].result.periph_result.target_lun = -1;
 2571 
 2572                 cdm->matches[j].result.periph_result.unit_number =
 2573                         periph->unit_number;
 2574                 strncpy(cdm->matches[j].result.periph_result.periph_name,
 2575                         periph->periph_name, DEV_IDLEN);
 2576         }
 2577 
 2578         return(1);
 2579 }
 2580 
 2581 static int
 2582 xptperiphlistmatch(struct ccb_dev_match *cdm)
 2583 {
 2584         int ret;
 2585 
 2586         cdm->num_matches = 0;
 2587 
 2588         /*
 2589          * At this point in the edt traversal function, we check the bus
 2590          * list generation to make sure that no busses have been added or
 2591          * removed since the user last sent a XPT_DEV_MATCH ccb through.
 2592          * For the peripheral driver list traversal function, however, we
 2593          * don't have to worry about new peripheral driver types coming or
 2594          * going; they're in a linker set, and therefore can't change
 2595          * without a recompile.
 2596          */
 2597 
 2598         if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
 2599          && (cdm->pos.cookie.pdrv != NULL))
 2600                 ret = xptpdrvtraverse(
 2601                                 (struct periph_driver **)cdm->pos.cookie.pdrv,
 2602                                 xptplistpdrvfunc, cdm);
 2603         else
 2604                 ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm);
 2605 
 2606         /*
 2607          * If we get back 0, that means that we had to stop before fully
 2608          * traversing the peripheral driver tree.  It also means that one of
 2609          * the subroutines has set the status field to the proper value.  If
 2610          * we get back 1, we've fully traversed the EDT and copied out any
 2611          * matching entries.
 2612          */
 2613         if (ret == 1)
 2614                 cdm->status = CAM_DEV_MATCH_LAST;
 2615 
 2616         return(ret);
 2617 }
 2618 
 2619 static int
 2620 xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg)
 2621 {
 2622         struct cam_eb *bus, *next_bus;
 2623         int retval;
 2624 
 2625         retval = 1;
 2626 
 2627         for (bus = (start_bus ? start_bus : TAILQ_FIRST(&xpt_busses));
 2628              bus != NULL;
 2629              bus = next_bus) {
 2630                 next_bus = TAILQ_NEXT(bus, links);
 2631 
 2632                 retval = tr_func(bus, arg);
 2633                 if (retval == 0)
 2634                         return(retval);
 2635         }
 2636 
 2637         return(retval);
 2638 }
 2639 
 2640 static int
 2641 xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target,
 2642                   xpt_targetfunc_t *tr_func, void *arg)
 2643 {
 2644         struct cam_et *target, *next_target;
 2645         int retval;
 2646 
 2647         retval = 1;
 2648         for (target = (start_target ? start_target :
 2649                        TAILQ_FIRST(&bus->et_entries));
 2650              target != NULL; target = next_target) {
 2651 
 2652                 next_target = TAILQ_NEXT(target, links);
 2653 
 2654                 retval = tr_func(target, arg);
 2655 
 2656                 if (retval == 0)
 2657                         return(retval);
 2658         }
 2659 
 2660         return(retval);
 2661 }
 2662 
 2663 static int
 2664 xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device,
 2665                   xpt_devicefunc_t *tr_func, void *arg)
 2666 {
 2667         struct cam_ed *device, *next_device;
 2668         int retval;
 2669 
 2670         retval = 1;
 2671         for (device = (start_device ? start_device :
 2672                        TAILQ_FIRST(&target->ed_entries));
 2673              device != NULL;
 2674              device = next_device) {
 2675 
 2676                 next_device = TAILQ_NEXT(device, links);
 2677 
 2678                 retval = tr_func(device, arg);
 2679 
 2680                 if (retval == 0)
 2681                         return(retval);
 2682         }
 2683 
 2684         return(retval);
 2685 }
 2686 
 2687 static int
 2688 xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph,
 2689                   xpt_periphfunc_t *tr_func, void *arg)
 2690 {
 2691         struct cam_periph *periph, *next_periph;
 2692         int retval;
 2693 
 2694         retval = 1;
 2695 
 2696         for (periph = (start_periph ? start_periph :
 2697                        SLIST_FIRST(&device->periphs));
 2698              periph != NULL;
 2699              periph = next_periph) {
 2700 
 2701                 next_periph = SLIST_NEXT(periph, periph_links);
 2702 
 2703                 retval = tr_func(periph, arg);
 2704                 if (retval == 0)
 2705                         return(retval);
 2706         }
 2707 
 2708         return(retval);
 2709 }
 2710 
 2711 static int
 2712 xptpdrvtraverse(struct periph_driver **start_pdrv,
 2713                 xpt_pdrvfunc_t *tr_func, void *arg)
 2714 {
 2715         struct periph_driver **pdrv;
 2716         int retval;
 2717 
 2718         retval = 1;
 2719 
 2720         /*
 2721          * We don't traverse the peripheral driver list like we do the
 2722          * other lists, because it is a linker set, and therefore cannot be
 2723          * changed during runtime.  If the peripheral driver list is ever
 2724          * re-done to be something other than a linker set (i.e. it can
 2725          * change while the system is running), the list traversal should
 2726          * be modified to work like the other traversal functions.
 2727          */
 2728         for (pdrv = (start_pdrv ? start_pdrv : periph_drivers);
 2729              *pdrv != NULL; pdrv++) {
 2730                 retval = tr_func(pdrv, arg);
 2731 
 2732                 if (retval == 0)
 2733                         return(retval);
 2734         }
 2735 
 2736         return(retval);
 2737 }
 2738 
 2739 static int
 2740 xptpdperiphtraverse(struct periph_driver **pdrv,
 2741                     struct cam_periph *start_periph,
 2742                     xpt_periphfunc_t *tr_func, void *arg)
 2743 {
 2744         struct cam_periph *periph, *next_periph;
 2745         int retval;
 2746 
 2747         retval = 1;
 2748 
 2749         for (periph = (start_periph ? start_periph :
 2750              TAILQ_FIRST(&(*pdrv)->units)); periph != NULL;
 2751              periph = next_periph) {
 2752 
 2753                 next_periph = TAILQ_NEXT(periph, unit_links);
 2754 
 2755                 retval = tr_func(periph, arg);
 2756                 if (retval == 0)
 2757                         return(retval);
 2758         }
 2759         return(retval);
 2760 }
 2761 
 2762 static int
 2763 xptdefbusfunc(struct cam_eb *bus, void *arg)
 2764 {
 2765         struct xpt_traverse_config *tr_config;
 2766 
 2767         tr_config = (struct xpt_traverse_config *)arg;
 2768 
 2769         if (tr_config->depth == XPT_DEPTH_BUS) {
 2770                 xpt_busfunc_t *tr_func;
 2771 
 2772                 tr_func = (xpt_busfunc_t *)tr_config->tr_func;
 2773 
 2774                 return(tr_func(bus, tr_config->tr_arg));
 2775         } else
 2776                 return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg));
 2777 }
 2778 
 2779 static int
 2780 xptdeftargetfunc(struct cam_et *target, void *arg)
 2781 {
 2782         struct xpt_traverse_config *tr_config;
 2783 
 2784         tr_config = (struct xpt_traverse_config *)arg;
 2785 
 2786         if (tr_config->depth == XPT_DEPTH_TARGET) {
 2787                 xpt_targetfunc_t *tr_func;
 2788 
 2789                 tr_func = (xpt_targetfunc_t *)tr_config->tr_func;
 2790 
 2791                 return(tr_func(target, tr_config->tr_arg));
 2792         } else
 2793                 return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg));
 2794 }
 2795 
 2796 static int
 2797 xptdefdevicefunc(struct cam_ed *device, void *arg)
 2798 {
 2799         struct xpt_traverse_config *tr_config;
 2800 
 2801         tr_config = (struct xpt_traverse_config *)arg;
 2802 
 2803         if (tr_config->depth == XPT_DEPTH_DEVICE) {
 2804                 xpt_devicefunc_t *tr_func;
 2805 
 2806                 tr_func = (xpt_devicefunc_t *)tr_config->tr_func;
 2807 
 2808                 return(tr_func(device, tr_config->tr_arg));
 2809         } else
 2810                 return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg));
 2811 }
 2812 
 2813 static int
 2814 xptdefperiphfunc(struct cam_periph *periph, void *arg)
 2815 {
 2816         struct xpt_traverse_config *tr_config;
 2817         xpt_periphfunc_t *tr_func;
 2818 
 2819         tr_config = (struct xpt_traverse_config *)arg;
 2820 
 2821         tr_func = (xpt_periphfunc_t *)tr_config->tr_func;
 2822 
 2823         /*
 2824          * Unlike the other default functions, we don't check for depth
 2825          * here.  The peripheral driver level is the last level in the EDT,
 2826          * so if we're here, we should execute the function in question.
 2827          */
 2828         return(tr_func(periph, tr_config->tr_arg));
 2829 }
 2830 
 2831 /*
 2832  * Execute the given function for every bus in the EDT.
 2833  */
 2834 static int
 2835 xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg)
 2836 {
 2837         struct xpt_traverse_config tr_config;
 2838 
 2839         tr_config.depth = XPT_DEPTH_BUS;
 2840         tr_config.tr_func = tr_func;
 2841         tr_config.tr_arg = arg;
 2842 
 2843         return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
 2844 }
 2845 
 2846 #ifdef notusedyet
 2847 /*
 2848  * Execute the given function for every target in the EDT.
 2849  */
 2850 static int
 2851 xpt_for_all_targets(xpt_targetfunc_t *tr_func, void *arg)
 2852 {
 2853         struct xpt_traverse_config tr_config;
 2854 
 2855         tr_config.depth = XPT_DEPTH_TARGET;
 2856         tr_config.tr_func = tr_func;
 2857         tr_config.tr_arg = arg;
 2858 
 2859         return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
 2860 }
 2861 #endif /* notusedyet */
 2862 
 2863 /*
 2864  * Execute the given function for every device in the EDT.
 2865  */
 2866 static int
 2867 xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg)
 2868 {
 2869         struct xpt_traverse_config tr_config;
 2870 
 2871         tr_config.depth = XPT_DEPTH_DEVICE;
 2872         tr_config.tr_func = tr_func;
 2873         tr_config.tr_arg = arg;
 2874 
 2875         return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
 2876 }
 2877 
 2878 #ifdef notusedyet
 2879 /*
 2880  * Execute the given function for every peripheral in the EDT.
 2881  */
 2882 static int
 2883 xpt_for_all_periphs(xpt_periphfunc_t *tr_func, void *arg)
 2884 {
 2885         struct xpt_traverse_config tr_config;
 2886 
 2887         tr_config.depth = XPT_DEPTH_PERIPH;
 2888         tr_config.tr_func = tr_func;
 2889         tr_config.tr_arg = arg;
 2890 
 2891         return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
 2892 }
 2893 #endif /* notusedyet */
 2894 
 2895 static int
 2896 xptsetasyncfunc(struct cam_ed *device, void *arg)
 2897 {
 2898         struct cam_path path;
 2899         struct ccb_getdev cgd;
 2900         struct async_node *cur_entry;
 2901 
 2902         cur_entry = (struct async_node *)arg;
 2903 
 2904         /*
 2905          * Don't report unconfigured devices (Wildcard devs,
 2906          * devices only for target mode, device instances
 2907          * that have been invalidated but are waiting for
 2908          * their last reference count to be released).
 2909          */
 2910         if ((device->flags & CAM_DEV_UNCONFIGURED) != 0)
 2911                 return (1);
 2912 
 2913         xpt_compile_path(&path,
 2914                          NULL,
 2915                          device->target->bus->path_id,
 2916                          device->target->target_id,
 2917                          device->lun_id);
 2918         xpt_setup_ccb(&cgd.ccb_h, &path, /*priority*/1);
 2919         cgd.ccb_h.func_code = XPT_GDEV_TYPE;
 2920         xpt_action((union ccb *)&cgd);
 2921         cur_entry->callback(cur_entry->callback_arg,
 2922                             AC_FOUND_DEVICE,
 2923                             &path, &cgd);
 2924         xpt_release_path(&path);
 2925 
 2926         return(1);
 2927 }
 2928 
 2929 static int
 2930 xptsetasyncbusfunc(struct cam_eb *bus, void *arg)
 2931 {
 2932         struct cam_path path;
 2933         struct ccb_pathinq cpi;
 2934         struct async_node *cur_entry;
 2935 
 2936         cur_entry = (struct async_node *)arg;
 2937 
 2938         xpt_compile_path(&path, /*periph*/NULL,
 2939                          bus->sim->path_id,
 2940                          CAM_TARGET_WILDCARD,
 2941                          CAM_LUN_WILDCARD);
 2942         xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
 2943         cpi.ccb_h.func_code = XPT_PATH_INQ;
 2944         xpt_action((union ccb *)&cpi);
 2945         cur_entry->callback(cur_entry->callback_arg,
 2946                             AC_PATH_REGISTERED,
 2947                             &path, &cpi);
 2948         xpt_release_path(&path);
 2949 
 2950         return(1);
 2951 }
 2952 
 2953 void
 2954 xpt_action(union ccb *start_ccb)
 2955 {
 2956         int iopl;
 2957 
 2958         GIANT_REQUIRED;
 2959 
 2960         CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_action\n"));
 2961 
 2962         start_ccb->ccb_h.status = CAM_REQ_INPROG;
 2963 
 2964         iopl = splsoftcam();
 2965         switch (start_ccb->ccb_h.func_code) {
 2966         case XPT_SCSI_IO:
 2967         {
 2968 #ifdef CAM_NEW_TRAN_CODE
 2969                 struct cam_ed *device;
 2970 #endif /* CAM_NEW_TRAN_CODE */
 2971 #ifdef CAMDEBUG
 2972                 char cdb_str[(SCSI_MAX_CDBLEN * 3) + 1];
 2973                 struct cam_path *path;
 2974 
 2975                 path = start_ccb->ccb_h.path;
 2976 #endif
 2977 
 2978                 /*
 2979                  * For the sake of compatibility with SCSI-1
 2980                  * devices that may not understand the identify
 2981                  * message, we include lun information in the
 2982                  * second byte of all commands.  SCSI-1 specifies
 2983                  * that luns are a 3 bit value and reserves only 3
 2984                  * bits for lun information in the CDB.  Later
 2985                  * revisions of the SCSI spec allow for more than 8
 2986                  * luns, but have deprecated lun information in the
 2987                  * CDB.  So, if the lun won't fit, we must omit.
 2988                  *
 2989                  * Also be aware that during initial probing for devices,
 2990                  * the inquiry information is unknown but initialized to 0.
 2991                  * This means that this code will be exercised while probing
 2992                  * devices with an ANSI revision greater than 2.
 2993                  */
 2994 #ifdef CAM_NEW_TRAN_CODE
 2995                 device = start_ccb->ccb_h.path->device;
 2996                 if (device->protocol_version <= SCSI_REV_2
 2997 #else /* CAM_NEW_TRAN_CODE */
 2998                 if (SID_ANSI_REV(&start_ccb->ccb_h.path->device->inq_data) <= 2
 2999 #endif /* CAM_NEW_TRAN_CODE */
 3000                  && start_ccb->ccb_h.target_lun < 8
 3001                  && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) {
 3002 
 3003                         start_ccb->csio.cdb_io.cdb_bytes[1] |=
 3004                             start_ccb->ccb_h.target_lun << 5;
 3005                 }
 3006                 start_ccb->csio.scsi_status = SCSI_STATUS_OK;
 3007                 CAM_DEBUG(path, CAM_DEBUG_CDB,("%s. CDB: %s\n",
 3008                           scsi_op_desc(start_ccb->csio.cdb_io.cdb_bytes[0],
 3009                                        &path->device->inq_data),
 3010                           scsi_cdb_string(start_ccb->csio.cdb_io.cdb_bytes,
 3011                                           cdb_str, sizeof(cdb_str))));
 3012         }
 3013         /* FALLTHROUGH */
 3014         case XPT_TARGET_IO:
 3015         case XPT_CONT_TARGET_IO:
 3016                 start_ccb->csio.sense_resid = 0;
 3017                 start_ccb->csio.resid = 0;
 3018                 /* FALLTHROUGH */
 3019         case XPT_RESET_DEV:
 3020         case XPT_ENG_EXEC:
 3021         {
 3022                 struct cam_path *path;
 3023                 int s;
 3024                 int runq;
 3025 
 3026                 path = start_ccb->ccb_h.path;
 3027                 s = splsoftcam();
 3028 
 3029                 cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb);
 3030                 if (path->device->qfrozen_cnt == 0)
 3031                         runq = xpt_schedule_dev_sendq(path->bus, path->device);
 3032                 else
 3033                         runq = 0;
 3034                 splx(s);
 3035                 if (runq != 0)
 3036                         xpt_run_dev_sendq(path->bus);
 3037                 break;
 3038         }
 3039         case XPT_SET_TRAN_SETTINGS:
 3040         {
 3041                 xpt_set_transfer_settings(&start_ccb->cts,
 3042                                           start_ccb->ccb_h.path->device,
 3043                                           /*async_update*/FALSE);
 3044                 break;
 3045         }
 3046         case XPT_CALC_GEOMETRY:
 3047         {
 3048                 struct cam_sim *sim;
 3049 
 3050                 /* Filter out garbage */
 3051                 if (start_ccb->ccg.block_size == 0
 3052                  || start_ccb->ccg.volume_size == 0) {
 3053                         start_ccb->ccg.cylinders = 0;
 3054                         start_ccb->ccg.heads = 0;
 3055                         start_ccb->ccg.secs_per_track = 0;
 3056                         start_ccb->ccb_h.status = CAM_REQ_CMP;
 3057                         break;
 3058                 }
 3059 #ifdef PC98
 3060                 /*
 3061                  * In a PC-98 system, geometry translation depens on
 3062                  * the "real" device geometry obtained from mode page 4.
 3063                  * SCSI geometry translation is performed in the
 3064                  * initialization routine of the SCSI BIOS and the result
 3065                  * stored in host memory.  If the translation is available
 3066                  * in host memory, use it.  If not, rely on the default
 3067                  * translation the device driver performs.
 3068                  */
 3069                 if (scsi_da_bios_params(&start_ccb->ccg) != 0) {
 3070                         start_ccb->ccb_h.status = CAM_REQ_CMP;
 3071                         break;
 3072                 }
 3073 #endif
 3074                 sim = start_ccb->ccb_h.path->bus->sim;
 3075                 (*(sim->sim_action))(sim, start_ccb);
 3076                 break;
 3077         }
 3078         case XPT_ABORT:
 3079         {
 3080                 union ccb* abort_ccb;
 3081                 int s;                          
 3082 
 3083                 abort_ccb = start_ccb->cab.abort_ccb;
 3084                 if (XPT_FC_IS_DEV_QUEUED(abort_ccb)) {
 3085 
 3086                         if (abort_ccb->ccb_h.pinfo.index >= 0) {
 3087                                 struct cam_ccbq *ccbq;
 3088 
 3089                                 ccbq = &abort_ccb->ccb_h.path->device->ccbq;
 3090                                 cam_ccbq_remove_ccb(ccbq, abort_ccb);
 3091                                 abort_ccb->ccb_h.status =
 3092                                     CAM_REQ_ABORTED|CAM_DEV_QFRZN;
 3093                                 xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
 3094                                 s = splcam();
 3095                                 xpt_done(abort_ccb);
 3096                                 splx(s);
 3097                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 3098                                 break;
 3099                         }
 3100                         if (abort_ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX
 3101                          && (abort_ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) {
 3102                                 /*
 3103                                  * We've caught this ccb en route to
 3104                                  * the SIM.  Flag it for abort and the
 3105                                  * SIM will do so just before starting
 3106                                  * real work on the CCB.
 3107                                  */
 3108                                 abort_ccb->ccb_h.status =
 3109                                     CAM_REQ_ABORTED|CAM_DEV_QFRZN;
 3110                                 xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
 3111                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 3112                                 break;
 3113                         }
 3114                 } 
 3115                 if (XPT_FC_IS_QUEUED(abort_ccb)
 3116                  && (abort_ccb->ccb_h.pinfo.index == CAM_DONEQ_INDEX)) {
 3117                         /*
 3118                          * It's already completed but waiting
 3119                          * for our SWI to get to it.
 3120                          */
 3121                         start_ccb->ccb_h.status = CAM_UA_ABORT;
 3122                         break;
 3123                 }
 3124                 /*
 3125                  * If we weren't able to take care of the abort request
 3126                  * in the XPT, pass the request down to the SIM for processing.
 3127                  */
 3128         }
 3129         /* FALLTHROUGH */
 3130         case XPT_ACCEPT_TARGET_IO:
 3131         case XPT_EN_LUN:
 3132         case XPT_IMMED_NOTIFY:
 3133         case XPT_NOTIFY_ACK:
 3134         case XPT_GET_TRAN_SETTINGS:
 3135         case XPT_RESET_BUS:
 3136         {
 3137                 struct cam_sim *sim;
 3138 
 3139                 sim = start_ccb->ccb_h.path->bus->sim;
 3140                 (*(sim->sim_action))(sim, start_ccb);
 3141                 break;
 3142         }
 3143         case XPT_PATH_INQ:
 3144         {
 3145                 struct cam_sim *sim;
 3146 
 3147                 sim = start_ccb->ccb_h.path->bus->sim;
 3148                 (*(sim->sim_action))(sim, start_ccb);
 3149                 break;
 3150         }
 3151         case XPT_PATH_STATS:
 3152                 start_ccb->cpis.last_reset =
 3153                         start_ccb->ccb_h.path->bus->last_reset;
 3154                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 3155                 break;
 3156         case XPT_GDEV_TYPE:
 3157         {
 3158                 struct cam_ed *dev;
 3159                 int s;
 3160 
 3161                 dev = start_ccb->ccb_h.path->device;
 3162                 s = splcam();
 3163                 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
 3164                         start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
 3165                 } else {
 3166                         struct ccb_getdev *cgd;
 3167                         struct cam_eb *bus;
 3168                         struct cam_et *tar;
 3169 
 3170                         cgd = &start_ccb->cgd;
 3171                         bus = cgd->ccb_h.path->bus;
 3172                         tar = cgd->ccb_h.path->target;
 3173                         cgd->inq_data = dev->inq_data;
 3174                         cgd->ccb_h.status = CAM_REQ_CMP;
 3175                         cgd->serial_num_len = dev->serial_num_len;
 3176                         if ((dev->serial_num_len > 0)
 3177                          && (dev->serial_num != NULL))
 3178                                 bcopy(dev->serial_num, cgd->serial_num,
 3179                                       dev->serial_num_len);
 3180                 }
 3181                 splx(s);
 3182                 break; 
 3183         }
 3184         case XPT_GDEV_STATS:
 3185         {
 3186                 struct cam_ed *dev;
 3187                 int s;
 3188 
 3189                 dev = start_ccb->ccb_h.path->device;
 3190                 s = splcam();
 3191                 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
 3192                         start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
 3193                 } else {
 3194                         struct ccb_getdevstats *cgds;
 3195                         struct cam_eb *bus;
 3196                         struct cam_et *tar;
 3197 
 3198                         cgds = &start_ccb->cgds;
 3199                         bus = cgds->ccb_h.path->bus;
 3200                         tar = cgds->ccb_h.path->target;
 3201                         cgds->dev_openings = dev->ccbq.dev_openings;
 3202                         cgds->dev_active = dev->ccbq.dev_active;
 3203                         cgds->devq_openings = dev->ccbq.devq_openings;
 3204                         cgds->devq_queued = dev->ccbq.queue.entries;
 3205                         cgds->held = dev->ccbq.held;
 3206                         cgds->last_reset = tar->last_reset;
 3207                         cgds->maxtags = dev->quirk->maxtags;
 3208                         cgds->mintags = dev->quirk->mintags;
 3209                         if (timevalcmp(&tar->last_reset, &bus->last_reset, <))
 3210                                 cgds->last_reset = bus->last_reset;
 3211                         cgds->ccb_h.status = CAM_REQ_CMP;
 3212                 }
 3213                 splx(s);
 3214                 break;
 3215         }
 3216         case XPT_GDEVLIST:
 3217         {
 3218                 struct cam_periph       *nperiph;
 3219                 struct periph_list      *periph_head;
 3220                 struct ccb_getdevlist   *cgdl;
 3221                 u_int                   i;
 3222                 int                     s;
 3223                 struct cam_ed           *device;
 3224                 int                     found;
 3225 
 3226 
 3227                 found = 0;
 3228 
 3229                 /*
 3230                  * Don't want anyone mucking with our data.
 3231                  */
 3232                 s = splcam();
 3233                 device = start_ccb->ccb_h.path->device;
 3234                 periph_head = &device->periphs;
 3235                 cgdl = &start_ccb->cgdl;
 3236 
 3237                 /*
 3238                  * Check and see if the list has changed since the user
 3239                  * last requested a list member.  If so, tell them that the
 3240                  * list has changed, and therefore they need to start over 
 3241                  * from the beginning.
 3242                  */
 3243                 if ((cgdl->index != 0) && 
 3244                     (cgdl->generation != device->generation)) {
 3245                         cgdl->status = CAM_GDEVLIST_LIST_CHANGED;
 3246                         splx(s);
 3247                         break;
 3248                 }
 3249 
 3250                 /*
 3251                  * Traverse the list of peripherals and attempt to find 
 3252                  * the requested peripheral.
 3253                  */
 3254                 for (nperiph = SLIST_FIRST(periph_head), i = 0;
 3255                      (nperiph != NULL) && (i <= cgdl->index);
 3256                      nperiph = SLIST_NEXT(nperiph, periph_links), i++) {
 3257                         if (i == cgdl->index) {
 3258                                 strncpy(cgdl->periph_name,
 3259                                         nperiph->periph_name,
 3260                                         DEV_IDLEN);
 3261                                 cgdl->unit_number = nperiph->unit_number;
 3262                                 found = 1;
 3263                         }
 3264                 }
 3265                 if (found == 0) {
 3266                         cgdl->status = CAM_GDEVLIST_ERROR;
 3267                         splx(s);
 3268                         break;
 3269                 }
 3270 
 3271                 if (nperiph == NULL)
 3272                         cgdl->status = CAM_GDEVLIST_LAST_DEVICE;
 3273                 else
 3274                         cgdl->status = CAM_GDEVLIST_MORE_DEVS;
 3275 
 3276                 cgdl->index++;
 3277                 cgdl->generation = device->generation;
 3278 
 3279                 splx(s);
 3280                 cgdl->ccb_h.status = CAM_REQ_CMP;
 3281                 break;
 3282         }
 3283         case XPT_DEV_MATCH:
 3284         {
 3285                 int s;
 3286                 dev_pos_type position_type;
 3287                 struct ccb_dev_match *cdm;
 3288 
 3289                 cdm = &start_ccb->cdm;
 3290 
 3291                 /*
 3292                  * Prevent EDT changes while we traverse it.
 3293                  */
 3294                 s = splcam();
 3295                 /*
 3296                  * There are two ways of getting at information in the EDT.
 3297                  * The first way is via the primary EDT tree.  It starts
 3298                  * with a list of busses, then a list of targets on a bus,
 3299                  * then devices/luns on a target, and then peripherals on a
 3300                  * device/lun.  The "other" way is by the peripheral driver
 3301                  * lists.  The peripheral driver lists are organized by
 3302                  * peripheral driver.  (obviously)  So it makes sense to
 3303                  * use the peripheral driver list if the user is looking
 3304                  * for something like "da1", or all "da" devices.  If the
 3305                  * user is looking for something on a particular bus/target
 3306                  * or lun, it's generally better to go through the EDT tree.
 3307                  */
 3308 
 3309                 if (cdm->pos.position_type != CAM_DEV_POS_NONE)
 3310                         position_type = cdm->pos.position_type;
 3311                 else {
 3312                         u_int i;
 3313 
 3314                         position_type = CAM_DEV_POS_NONE;
 3315 
 3316                         for (i = 0; i < cdm->num_patterns; i++) {
 3317                                 if ((cdm->patterns[i].type == DEV_MATCH_BUS)
 3318                                  ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){
 3319                                         position_type = CAM_DEV_POS_EDT;
 3320                                         break;
 3321                                 }
 3322                         }
 3323 
 3324                         if (cdm->num_patterns == 0)
 3325                                 position_type = CAM_DEV_POS_EDT;
 3326                         else if (position_type == CAM_DEV_POS_NONE)
 3327                                 position_type = CAM_DEV_POS_PDRV;
 3328                 }
 3329 
 3330                 switch(position_type & CAM_DEV_POS_TYPEMASK) {
 3331                 case CAM_DEV_POS_EDT:
 3332                         xptedtmatch(cdm);
 3333                         break;
 3334                 case CAM_DEV_POS_PDRV:
 3335                         xptperiphlistmatch(cdm);
 3336                         break;
 3337                 default:
 3338                         cdm->status = CAM_DEV_MATCH_ERROR;
 3339                         break;
 3340                 }
 3341 
 3342                 splx(s);
 3343 
 3344                 if (cdm->status == CAM_DEV_MATCH_ERROR)
 3345                         start_ccb->ccb_h.status = CAM_REQ_CMP_ERR;
 3346                 else
 3347                         start_ccb->ccb_h.status = CAM_REQ_CMP;
 3348 
 3349                 break;
 3350         }
 3351         case XPT_SASYNC_CB:
 3352         {
 3353                 struct ccb_setasync *csa;
 3354                 struct async_node *cur_entry;
 3355                 struct async_list *async_head;
 3356                 u_int32_t added;
 3357                 int s;
 3358 
 3359                 csa = &start_ccb->csa;
 3360                 added = csa->event_enable;
 3361                 async_head = &csa->ccb_h.path->device->asyncs;
 3362 
 3363                 /*
 3364                  * If there is already an entry for us, simply
 3365                  * update it.
 3366                  */
 3367                 s = splcam();
 3368                 cur_entry = SLIST_FIRST(async_head);
 3369                 while (cur_entry != NULL) {
 3370                         if ((cur_entry->callback_arg == csa->callback_arg)
 3371                          && (cur_entry->callback == csa->callback))
 3372                                 break;
 3373                         cur_entry = SLIST_NEXT(cur_entry, links);
 3374                 }
 3375 
 3376                 if (cur_entry != NULL) {
 3377                         /*
 3378                          * If the request has no flags set,
 3379                          * remove the entry.
 3380                          */
 3381                         added &= ~cur_entry->event_enable;
 3382                         if (csa->event_enable == 0) {
 3383                                 SLIST_REMOVE(async_head, cur_entry,
 3384                                              async_node, links);
 3385                                 csa->ccb_h.path->device->refcount--;
 3386                                 free(cur_entry, M_DEVBUF);
 3387                         } else {
 3388                                 cur_entry->event_enable = csa->event_enable;
 3389                         }
 3390                 } else {
 3391                         cur_entry = malloc(sizeof(*cur_entry), M_DEVBUF,
 3392                                            M_NOWAIT);
 3393                         if (cur_entry == NULL) {
 3394                                 splx(s);
 3395                                 csa->ccb_h.status = CAM_RESRC_UNAVAIL;
 3396                                 break;
 3397                         }
 3398                         cur_entry->event_enable = csa->event_enable;
 3399                         cur_entry->callback_arg = csa->callback_arg;
 3400                         cur_entry->callback = csa->callback;
 3401                         SLIST_INSERT_HEAD(async_head, cur_entry, links);
 3402                         csa->ccb_h.path->device->refcount++;
 3403                 }
 3404 
 3405                 if ((added & AC_FOUND_DEVICE) != 0) {
 3406                         /*
 3407                          * Get this peripheral up to date with all
 3408                          * the currently existing devices.
 3409                          */
 3410                         xpt_for_all_devices(xptsetasyncfunc, cur_entry);
 3411                 }
 3412                 if ((added & AC_PATH_REGISTERED) != 0) {
 3413                         /*
 3414                          * Get this peripheral up to date with all
 3415                          * the currently existing busses.
 3416                          */
 3417                         xpt_for_all_busses(xptsetasyncbusfunc, cur_entry);
 3418                 }
 3419                 splx(s);
 3420                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 3421                 break;
 3422         }
 3423         case XPT_REL_SIMQ:
 3424         {
 3425                 struct ccb_relsim *crs;
 3426                 struct cam_ed *dev;
 3427                 int s;
 3428 
 3429                 crs = &start_ccb->crs;
 3430                 dev = crs->ccb_h.path->device;
 3431                 if (dev == NULL) {
 3432 
 3433                         crs->ccb_h.status = CAM_DEV_NOT_THERE;
 3434                         break;
 3435                 }
 3436 
 3437                 s = splcam();
 3438 
 3439                 if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) {
 3440 
 3441                         if ((dev->inq_data.flags & SID_CmdQue) != 0) {
 3442 
 3443                                 /* Don't ever go below one opening */
 3444                                 if (crs->openings > 0) {
 3445                                         xpt_dev_ccbq_resize(crs->ccb_h.path,
 3446                                                             crs->openings);
 3447 
 3448                                         if (bootverbose) {
 3449                                                 xpt_print_path(crs->ccb_h.path);
 3450                                                 printf("tagged openings "
 3451                                                        "now %d\n",
 3452                                                        crs->openings);
 3453                                         }
 3454                                 }
 3455                         }
 3456                 }
 3457 
 3458                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) {
 3459 
 3460                         if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
 3461 
 3462                                 /*
 3463                                  * Just extend the old timeout and decrement
 3464                                  * the freeze count so that a single timeout
 3465                                  * is sufficient for releasing the queue.
 3466                                  */
 3467                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
 3468                                 untimeout(xpt_release_devq_timeout,
 3469                                           dev, dev->c_handle);
 3470                         } else {
 3471 
 3472                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
 3473                         }
 3474 
 3475                         dev->c_handle =
 3476                                 timeout(xpt_release_devq_timeout,
 3477                                         dev,
 3478                                         (crs->release_timeout * hz) / 1000);
 3479 
 3480                         dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING;
 3481 
 3482                 }
 3483 
 3484                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) {
 3485 
 3486                         if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) {
 3487                                 /*
 3488                                  * Decrement the freeze count so that a single
 3489                                  * completion is still sufficient to unfreeze
 3490                                  * the queue.
 3491                                  */
 3492                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
 3493                         } else {
 3494                                 
 3495                                 dev->flags |= CAM_DEV_REL_ON_COMPLETE;
 3496                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
 3497                         }
 3498                 }
 3499 
 3500                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) {
 3501 
 3502                         if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
 3503                          || (dev->ccbq.dev_active == 0)) {
 3504 
 3505                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
 3506                         } else {
 3507                                 
 3508                                 dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY;
 3509                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
 3510                         }
 3511                 }
 3512                 splx(s);
 3513                 
 3514                 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0) {
 3515 
 3516                         xpt_release_devq(crs->ccb_h.path, /*count*/1,
 3517                                          /*run_queue*/TRUE);
 3518                 }
 3519                 start_ccb->crs.qfrozen_cnt = dev->qfrozen_cnt;
 3520                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 3521                 break;
 3522         }
 3523         case XPT_SCAN_BUS:
 3524                 xpt_scan_bus(start_ccb->ccb_h.path->periph, start_ccb);
 3525                 break;
 3526         case XPT_SCAN_LUN:
 3527                 xpt_scan_lun(start_ccb->ccb_h.path->periph,
 3528                              start_ccb->ccb_h.path, start_ccb->crcn.flags,
 3529                              start_ccb);
 3530                 break;
 3531         case XPT_DEBUG: {
 3532 #ifdef CAMDEBUG
 3533                 int s;
 3534                 
 3535                 s = splcam();
 3536 #ifdef CAM_DEBUG_DELAY
 3537                 cam_debug_delay = CAM_DEBUG_DELAY;
 3538 #endif
 3539                 cam_dflags = start_ccb->cdbg.flags;
 3540                 if (cam_dpath != NULL) {
 3541                         xpt_free_path(cam_dpath);
 3542                         cam_dpath = NULL;
 3543                 }
 3544 
 3545                 if (cam_dflags != CAM_DEBUG_NONE) {
 3546                         if (xpt_create_path(&cam_dpath, xpt_periph,
 3547                                             start_ccb->ccb_h.path_id,
 3548                                             start_ccb->ccb_h.target_id,
 3549                                             start_ccb->ccb_h.target_lun) !=
 3550                                             CAM_REQ_CMP) {
 3551                                 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
 3552                                 cam_dflags = CAM_DEBUG_NONE;
 3553                         } else {
 3554                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 3555                                 xpt_print_path(cam_dpath);
 3556                                 printf("debugging flags now %x\n", cam_dflags);
 3557                         }
 3558                 } else {
 3559                         cam_dpath = NULL;
 3560                         start_ccb->ccb_h.status = CAM_REQ_CMP;
 3561                 }
 3562                 splx(s);
 3563 #else /* !CAMDEBUG */
 3564                 start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
 3565 #endif /* CAMDEBUG */
 3566                 break;
 3567         }
 3568         case XPT_NOOP:
 3569                 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0)
 3570                         xpt_freeze_devq(start_ccb->ccb_h.path, 1);
 3571                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 3572                 break;
 3573         default:
 3574         case XPT_SDEV_TYPE:
 3575         case XPT_TERM_IO:
 3576         case XPT_ENG_INQ:
 3577                 /* XXX Implement */
 3578                 start_ccb->ccb_h.status = CAM_PROVIDE_FAIL;
 3579                 break;
 3580         }
 3581         splx(iopl);
 3582 }
 3583 
 3584 void
 3585 xpt_polled_action(union ccb *start_ccb)
 3586 {
 3587         int       s;
 3588         u_int32_t timeout;
 3589         struct    cam_sim *sim; 
 3590         struct    cam_devq *devq;
 3591         struct    cam_ed *dev;
 3592 
 3593         GIANT_REQUIRED;
 3594 
 3595         timeout = start_ccb->ccb_h.timeout;
 3596         sim = start_ccb->ccb_h.path->bus->sim;
 3597         devq = sim->devq;
 3598         dev = start_ccb->ccb_h.path->device;
 3599 
 3600         s = splcam();
 3601 
 3602         /*
 3603          * Steal an opening so that no other queued requests
 3604          * can get it before us while we simulate interrupts.
 3605          */
 3606         dev->ccbq.devq_openings--;
 3607         dev->ccbq.dev_openings--;       
 3608         
 3609         while((devq->send_openings <= 0 || dev->ccbq.dev_openings < 0)
 3610            && (--timeout > 0)) {
 3611                 DELAY(1000);
 3612                 (*(sim->sim_poll))(sim);
 3613                 camisr(&cam_netq);
 3614                 camisr(&cam_bioq);
 3615         }
 3616         
 3617         dev->ccbq.devq_openings++;
 3618         dev->ccbq.dev_openings++;
 3619         
 3620         if (timeout != 0) {
 3621                 xpt_action(start_ccb);
 3622                 while(--timeout > 0) {
 3623                         (*(sim->sim_poll))(sim);
 3624                         camisr(&cam_netq);
 3625                         camisr(&cam_bioq);
 3626                         if ((start_ccb->ccb_h.status  & CAM_STATUS_MASK)
 3627                             != CAM_REQ_INPROG)
 3628                                 break;
 3629                         DELAY(1000);
 3630                 }
 3631                 if (timeout == 0) {
 3632                         /*
 3633                          * XXX Is it worth adding a sim_timeout entry
 3634                          * point so we can attempt recovery?  If
 3635                          * this is only used for dumps, I don't think
 3636                          * it is.
 3637                          */
 3638                         start_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
 3639                 }
 3640         } else {
 3641                 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
 3642         }
 3643         splx(s);
 3644 }
 3645         
 3646 /*
 3647  * Schedule a peripheral driver to receive a ccb when it's
 3648  * target device has space for more transactions.
 3649  */
 3650 void
 3651 xpt_schedule(struct cam_periph *perph, u_int32_t new_priority)
 3652 {
 3653         struct cam_ed *device;
 3654         int s;
 3655         int runq;
 3656 
 3657         GIANT_REQUIRED;
 3658 
 3659         CAM_DEBUG(perph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n"));
 3660         device = perph->path->device;
 3661         s = splsoftcam();
 3662         if (periph_is_queued(perph)) {
 3663                 /* Simply reorder based on new priority */
 3664                 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
 3665                           ("   change priority to %d\n", new_priority));
 3666                 if (new_priority < perph->pinfo.priority) {
 3667                         camq_change_priority(&device->drvq,
 3668                                              perph->pinfo.index,
 3669                                              new_priority);
 3670                 }
 3671                 runq = 0;
 3672         } else {
 3673                 /* New entry on the queue */
 3674                 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
 3675                           ("   added periph to queue\n"));
 3676                 perph->pinfo.priority = new_priority;
 3677                 perph->pinfo.generation = ++device->drvq.generation;
 3678                 camq_insert(&device->drvq, &perph->pinfo);
 3679                 runq = xpt_schedule_dev_allocq(perph->path->bus, device);
 3680         }
 3681         splx(s);
 3682         if (runq != 0) {
 3683                 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
 3684                           ("   calling xpt_run_devq\n"));
 3685                 xpt_run_dev_allocq(perph->path->bus);
 3686         }
 3687 }
 3688 
 3689 
 3690 /*
 3691  * Schedule a device to run on a given queue.
 3692  * If the device was inserted as a new entry on the queue,
 3693  * return 1 meaning the device queue should be run. If we
 3694  * were already queued, implying someone else has already
 3695  * started the queue, return 0 so the caller doesn't attempt
 3696  * to run the queue.  Must be run at either splsoftcam
 3697  * (or splcam since that encompases splsoftcam).
 3698  */
 3699 static int
 3700 xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo,
 3701                  u_int32_t new_priority)
 3702 {
 3703         int retval;
 3704         u_int32_t old_priority;
 3705 
 3706         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_schedule_dev\n"));
 3707 
 3708         old_priority = pinfo->priority;
 3709 
 3710         /*
 3711          * Are we already queued?
 3712          */
 3713         if (pinfo->index != CAM_UNQUEUED_INDEX) {
 3714                 /* Simply reorder based on new priority */
 3715                 if (new_priority < old_priority) {
 3716                         camq_change_priority(queue, pinfo->index,
 3717                                              new_priority);
 3718                         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3719                                         ("changed priority to %d\n",
 3720                                          new_priority));
 3721                 }
 3722                 retval = 0;
 3723         } else {
 3724                 /* New entry on the queue */
 3725                 if (new_priority < old_priority)
 3726                         pinfo->priority = new_priority;
 3727 
 3728                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3729                                 ("Inserting onto queue\n"));
 3730                 pinfo->generation = ++queue->generation;
 3731                 camq_insert(queue, pinfo);
 3732                 retval = 1;
 3733         }
 3734         return (retval);
 3735 }
 3736 
 3737 static void
 3738 xpt_run_dev_allocq(struct cam_eb *bus)
 3739 {
 3740         struct  cam_devq *devq;
 3741         int     s;
 3742 
 3743         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_allocq\n"));
 3744         devq = bus->sim->devq;
 3745 
 3746         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3747                         ("   qfrozen_cnt == 0x%x, entries == %d, "
 3748                          "openings == %d, active == %d\n",
 3749                          devq->alloc_queue.qfrozen_cnt,
 3750                          devq->alloc_queue.entries,
 3751                          devq->alloc_openings,
 3752                          devq->alloc_active));
 3753 
 3754         s = splsoftcam();
 3755         devq->alloc_queue.qfrozen_cnt++;
 3756         while ((devq->alloc_queue.entries > 0)
 3757             && (devq->alloc_openings > 0)
 3758             && (devq->alloc_queue.qfrozen_cnt <= 1)) {
 3759                 struct  cam_ed_qinfo *qinfo;
 3760                 struct  cam_ed *device;
 3761                 union   ccb *work_ccb;
 3762                 struct  cam_periph *drv;
 3763                 struct  camq *drvq;
 3764                 
 3765                 qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->alloc_queue,
 3766                                                            CAMQ_HEAD);
 3767                 device = qinfo->device;
 3768 
 3769                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3770                                 ("running device %p\n", device));
 3771 
 3772                 drvq = &device->drvq;
 3773 
 3774 #ifdef CAMDEBUG
 3775                 if (drvq->entries <= 0) {
 3776                         panic("xpt_run_dev_allocq: "
 3777                               "Device on queue without any work to do");
 3778                 }
 3779 #endif
 3780                 if ((work_ccb = xpt_get_ccb(device)) != NULL) {
 3781                         devq->alloc_openings--;
 3782                         devq->alloc_active++;
 3783                         drv = (struct cam_periph*)camq_remove(drvq, CAMQ_HEAD);
 3784                         splx(s);
 3785                         xpt_setup_ccb(&work_ccb->ccb_h, drv->path,
 3786                                       drv->pinfo.priority);
 3787                         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3788                                         ("calling periph start\n"));
 3789                         drv->periph_start(drv, work_ccb);
 3790                 } else {
 3791                         /*
 3792                          * Malloc failure in alloc_ccb
 3793                          */
 3794                         /*
 3795                          * XXX add us to a list to be run from free_ccb
 3796                          * if we don't have any ccbs active on this
 3797                          * device queue otherwise we may never get run
 3798                          * again.
 3799                          */
 3800                         break;
 3801                 }
 3802         
 3803                 /* Raise IPL for possible insertion and test at top of loop */
 3804                 s = splsoftcam();
 3805 
 3806                 if (drvq->entries > 0) {
 3807                         /* We have more work.  Attempt to reschedule */
 3808                         xpt_schedule_dev_allocq(bus, device);
 3809                 }
 3810         }
 3811         devq->alloc_queue.qfrozen_cnt--;
 3812         splx(s);
 3813 }
 3814 
 3815 static void
 3816 xpt_run_dev_sendq(struct cam_eb *bus)
 3817 {
 3818         struct  cam_devq *devq;
 3819         int     s;
 3820 
 3821         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_sendq\n"));
 3822         
 3823         devq = bus->sim->devq;
 3824 
 3825         s = splcam();
 3826         devq->send_queue.qfrozen_cnt++;
 3827         splx(s);
 3828         s = splsoftcam();
 3829         while ((devq->send_queue.entries > 0)
 3830             && (devq->send_openings > 0)) {
 3831                 struct  cam_ed_qinfo *qinfo;
 3832                 struct  cam_ed *device;
 3833                 union ccb *work_ccb;
 3834                 struct  cam_sim *sim;
 3835                 int     ospl;
 3836 
 3837                 ospl = splcam();
 3838                 if (devq->send_queue.qfrozen_cnt > 1) {
 3839                         splx(ospl);
 3840                         break;
 3841                 }
 3842 
 3843                 qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->send_queue,
 3844                                                            CAMQ_HEAD);
 3845                 device = qinfo->device;
 3846 
 3847                 /*
 3848                  * If the device has been "frozen", don't attempt
 3849                  * to run it.
 3850                  */
 3851                 if (device->qfrozen_cnt > 0) {
 3852                         splx(ospl);
 3853                         continue;
 3854                 }
 3855 
 3856                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3857                                 ("running device %p\n", device));
 3858 
 3859                 work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD);
 3860                 if (work_ccb == NULL) {
 3861                         printf("device on run queue with no ccbs???\n");
 3862                         splx(ospl);
 3863                         continue;
 3864                 }
 3865 
 3866                 if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) {
 3867 
 3868                         if (num_highpower <= 0) {
 3869                                 /*
 3870                                  * We got a high power command, but we
 3871                                  * don't have any available slots.  Freeze
 3872                                  * the device queue until we have a slot
 3873                                  * available.
 3874                                  */
 3875                                 device->qfrozen_cnt++;
 3876                                 STAILQ_INSERT_TAIL(&highpowerq, 
 3877                                                    &work_ccb->ccb_h, 
 3878                                                    xpt_links.stqe);
 3879 
 3880                                 splx(ospl);
 3881                                 continue;
 3882                         } else {
 3883                                 /*
 3884                                  * Consume a high power slot while
 3885                                  * this ccb runs.
 3886                                  */
 3887                                 num_highpower--;
 3888                         }
 3889                 }
 3890                 devq->active_dev = device;
 3891                 cam_ccbq_remove_ccb(&device->ccbq, work_ccb);
 3892 
 3893                 cam_ccbq_send_ccb(&device->ccbq, work_ccb);
 3894                 splx(ospl);
 3895 
 3896                 devq->send_openings--;
 3897                 devq->send_active++;            
 3898                 
 3899                 if (device->ccbq.queue.entries > 0)
 3900                         xpt_schedule_dev_sendq(bus, device);
 3901 
 3902                 if (work_ccb && (work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0){
 3903                         /*
 3904                          * The client wants to freeze the queue
 3905                          * after this CCB is sent.
 3906                          */
 3907                         ospl = splcam();
 3908                         device->qfrozen_cnt++;
 3909                         splx(ospl);
 3910                 }
 3911                 
 3912                 splx(s);
 3913 
 3914                 /* In Target mode, the peripheral driver knows best... */
 3915                 if (work_ccb->ccb_h.func_code == XPT_SCSI_IO) {
 3916                         if ((device->inq_flags & SID_CmdQue) != 0
 3917                          && work_ccb->csio.tag_action != CAM_TAG_ACTION_NONE)
 3918                                 work_ccb->ccb_h.flags |= CAM_TAG_ACTION_VALID;
 3919                         else
 3920                                 /*
 3921                                  * Clear this in case of a retried CCB that
 3922                                  * failed due to a rejected tag.
 3923                                  */
 3924                                 work_ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
 3925                 }
 3926 
 3927                 /*
 3928                  * Device queues can be shared among multiple sim instances
 3929                  * that reside on different busses.  Use the SIM in the queue
 3930                  * CCB's path, rather than the one in the bus that was passed
 3931                  * into this function.
 3932                  */
 3933                 sim = work_ccb->ccb_h.path->bus->sim;
 3934                 (*(sim->sim_action))(sim, work_ccb);
 3935 
 3936                 ospl = splcam();
 3937                 devq->active_dev = NULL;
 3938                 splx(ospl);
 3939                 /* Raise IPL for possible insertion and test at top of loop */
 3940                 s = splsoftcam();
 3941         }
 3942         splx(s);
 3943         s = splcam();
 3944         devq->send_queue.qfrozen_cnt--;
 3945         splx(s);
 3946 }
 3947 
 3948 /*
 3949  * This function merges stuff from the slave ccb into the master ccb, while
 3950  * keeping important fields in the master ccb constant.
 3951  */
 3952 void
 3953 xpt_merge_ccb(union ccb *master_ccb, union ccb *slave_ccb)
 3954 {
 3955         GIANT_REQUIRED;
 3956 
 3957         /*
 3958          * Pull fields that are valid for peripheral drivers to set
 3959          * into the master CCB along with the CCB "payload".
 3960          */
 3961         master_ccb->ccb_h.retry_count = slave_ccb->ccb_h.retry_count;
 3962         master_ccb->ccb_h.func_code = slave_ccb->ccb_h.func_code;
 3963         master_ccb->ccb_h.timeout = slave_ccb->ccb_h.timeout;
 3964         master_ccb->ccb_h.flags = slave_ccb->ccb_h.flags;
 3965         bcopy(&(&slave_ccb->ccb_h)[1], &(&master_ccb->ccb_h)[1],
 3966               sizeof(union ccb) - sizeof(struct ccb_hdr));
 3967 }
 3968 
 3969 void
 3970 xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority)
 3971 {
 3972         GIANT_REQUIRED;
 3973 
 3974         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_setup_ccb\n"));
 3975         ccb_h->pinfo.priority = priority;
 3976         ccb_h->path = path;
 3977         ccb_h->path_id = path->bus->path_id;
 3978         if (path->target)
 3979                 ccb_h->target_id = path->target->target_id;
 3980         else
 3981                 ccb_h->target_id = CAM_TARGET_WILDCARD;
 3982         if (path->device) {
 3983                 ccb_h->target_lun = path->device->lun_id;
 3984                 ccb_h->pinfo.generation = ++path->device->ccbq.queue.generation;
 3985         } else {
 3986                 ccb_h->target_lun = CAM_TARGET_WILDCARD;
 3987         }
 3988         ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
 3989         ccb_h->flags = 0;
 3990 }
 3991 
 3992 /* Path manipulation functions */
 3993 cam_status
 3994 xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph,
 3995                 path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
 3996 {
 3997         struct     cam_path *path;
 3998         cam_status status;
 3999 
 4000         GIANT_REQUIRED;
 4001 
 4002         path = (struct cam_path *)malloc(sizeof(*path), M_DEVBUF, M_NOWAIT);
 4003 
 4004         if (path == NULL) {
 4005                 status = CAM_RESRC_UNAVAIL;
 4006                 return(status);
 4007         }
 4008         status = xpt_compile_path(path, perph, path_id, target_id, lun_id);
 4009         if (status != CAM_REQ_CMP) {
 4010                 free(path, M_DEVBUF);
 4011                 path = NULL;
 4012         }
 4013         *new_path_ptr = path;
 4014         return (status);
 4015 }
 4016 
 4017 static cam_status
 4018 xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph,
 4019                  path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
 4020 {
 4021         struct       cam_eb *bus;
 4022         struct       cam_et *target;
 4023         struct       cam_ed *device;
 4024         cam_status   status;
 4025         int          s;
 4026 
 4027         status = CAM_REQ_CMP;   /* Completed without error */
 4028         target = NULL;          /* Wildcarded */
 4029         device = NULL;          /* Wildcarded */
 4030 
 4031         /*
 4032          * We will potentially modify the EDT, so block interrupts
 4033          * that may attempt to create cam paths.
 4034          */
 4035         s = splcam();
 4036         bus = xpt_find_bus(path_id);
 4037         if (bus == NULL) {
 4038                 status = CAM_PATH_INVALID;
 4039         } else {
 4040                 target = xpt_find_target(bus, target_id);
 4041                 if (target == NULL) {
 4042                         /* Create one */
 4043                         struct cam_et *new_target;
 4044 
 4045                         new_target = xpt_alloc_target(bus, target_id);
 4046                         if (new_target == NULL) {
 4047                                 status = CAM_RESRC_UNAVAIL;
 4048                         } else {
 4049                                 target = new_target;
 4050                         }
 4051                 }
 4052                 if (target != NULL) {
 4053                         device = xpt_find_device(target, lun_id);
 4054                         if (device == NULL) {
 4055                                 /* Create one */
 4056                                 struct cam_ed *new_device;
 4057 
 4058                                 new_device = xpt_alloc_device(bus,
 4059                                                               target,
 4060                                                               lun_id);
 4061                                 if (new_device == NULL) {
 4062                                         status = CAM_RESRC_UNAVAIL;
 4063                                 } else {
 4064                                         device = new_device;
 4065                                 }
 4066                         }
 4067                 }
 4068         }
 4069         splx(s);
 4070 
 4071         /*
 4072          * Only touch the user's data if we are successful.
 4073          */
 4074         if (status == CAM_REQ_CMP) {
 4075                 new_path->periph = perph;
 4076                 new_path->bus = bus;
 4077                 new_path->target = target;
 4078                 new_path->device = device;
 4079                 CAM_DEBUG(new_path, CAM_DEBUG_TRACE, ("xpt_compile_path\n"));
 4080         } else {
 4081                 if (device != NULL)
 4082                         xpt_release_device(bus, target, device);
 4083                 if (target != NULL)
 4084                         xpt_release_target(bus, target);
 4085                 if (bus != NULL)
 4086                         xpt_release_bus(bus);
 4087         }
 4088         return (status);
 4089 }
 4090 
 4091 static void
 4092 xpt_release_path(struct cam_path *path)
 4093 {
 4094         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_path\n"));
 4095         if (path->device != NULL) {
 4096                 xpt_release_device(path->bus, path->target, path->device);
 4097                 path->device = NULL;
 4098         }
 4099         if (path->target != NULL) {
 4100                 xpt_release_target(path->bus, path->target);
 4101                 path->target = NULL;
 4102         }
 4103         if (path->bus != NULL) {
 4104                 xpt_release_bus(path->bus);
 4105                 path->bus = NULL;
 4106         }
 4107 }
 4108 
 4109 void
 4110 xpt_free_path(struct cam_path *path)
 4111 {
 4112         GIANT_REQUIRED;
 4113 
 4114         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_free_path\n"));
 4115         xpt_release_path(path);
 4116         free(path, M_DEVBUF);
 4117 }
 4118 
 4119 
 4120 /*
 4121  * Return -1 for failure, 0 for exact match, 1 for match with wildcards
 4122  * in path1, 2 for match with wildcards in path2.
 4123  */
 4124 int
 4125 xpt_path_comp(struct cam_path *path1, struct cam_path *path2)
 4126 {
 4127         GIANT_REQUIRED;
 4128 
 4129         int retval = 0;
 4130 
 4131         if (path1->bus != path2->bus) {
 4132                 if (path1->bus->path_id == CAM_BUS_WILDCARD)
 4133                         retval = 1;
 4134                 else if (path2->bus->path_id == CAM_BUS_WILDCARD)
 4135                         retval = 2;
 4136                 else
 4137                         return (-1);
 4138         }
 4139         if (path1->target != path2->target) {
 4140                 if (path1->target->target_id == CAM_TARGET_WILDCARD) {
 4141                         if (retval == 0)
 4142                                 retval = 1;
 4143                 } else if (path2->target->target_id == CAM_TARGET_WILDCARD)
 4144                         retval = 2;
 4145                 else
 4146                         return (-1);
 4147         }
 4148         if (path1->device != path2->device) {
 4149                 if (path1->device->lun_id == CAM_LUN_WILDCARD) {
 4150                         if (retval == 0)
 4151                                 retval = 1;
 4152                 } else if (path2->device->lun_id == CAM_LUN_WILDCARD)
 4153                         retval = 2;
 4154                 else
 4155                         return (-1);
 4156         }
 4157         return (retval);
 4158 }
 4159 
 4160 void
 4161 xpt_print_path(struct cam_path *path)
 4162 {
 4163         GIANT_REQUIRED;
 4164 
 4165         if (path == NULL)
 4166                 printf("(nopath): ");
 4167         else {
 4168                 if (path->periph != NULL)
 4169                         printf("(%s%d:", path->periph->periph_name,
 4170                                path->periph->unit_number);
 4171                 else
 4172                         printf("(noperiph:");
 4173 
 4174                 if (path->bus != NULL)
 4175                         printf("%s%d:%d:", path->bus->sim->sim_name,
 4176                                path->bus->sim->unit_number,
 4177                                path->bus->sim->bus_id);
 4178                 else
 4179                         printf("nobus:");
 4180 
 4181                 if (path->target != NULL)
 4182                         printf("%d:", path->target->target_id);
 4183                 else
 4184                         printf("X:");
 4185 
 4186                 if (path->device != NULL)
 4187                         printf("%d): ", path->device->lun_id);
 4188                 else
 4189                         printf("X): ");
 4190         }
 4191 }
 4192 
 4193 int
 4194 xpt_path_string(struct cam_path *path, char *str, size_t str_len)
 4195 {
 4196         struct sbuf sb;
 4197 
 4198         GIANT_REQUIRED;
 4199 
 4200         sbuf_new(&sb, str, str_len, 0);
 4201 
 4202         if (path == NULL)
 4203                 sbuf_printf(&sb, "(nopath): ");
 4204         else {
 4205                 if (path->periph != NULL)
 4206                         sbuf_printf(&sb, "(%s%d:", path->periph->periph_name,
 4207                                     path->periph->unit_number);
 4208                 else
 4209                         sbuf_printf(&sb, "(noperiph:");
 4210 
 4211                 if (path->bus != NULL)
 4212                         sbuf_printf(&sb, "%s%d:%d:", path->bus->sim->sim_name,
 4213                                     path->bus->sim->unit_number,
 4214                                     path->bus->sim->bus_id);
 4215                 else
 4216                         sbuf_printf(&sb, "nobus:");
 4217 
 4218                 if (path->target != NULL)
 4219                         sbuf_printf(&sb, "%d:", path->target->target_id);
 4220                 else
 4221                         sbuf_printf(&sb, "X:");
 4222 
 4223                 if (path->device != NULL)
 4224                         sbuf_printf(&sb, "%d): ", path->device->lun_id);
 4225                 else
 4226                         sbuf_printf(&sb, "X): ");
 4227         }
 4228         sbuf_finish(&sb);
 4229 
 4230         return(sbuf_len(&sb));
 4231 }
 4232 
 4233 path_id_t
 4234 xpt_path_path_id(struct cam_path *path)
 4235 {
 4236         GIANT_REQUIRED;
 4237 
 4238         return(path->bus->path_id);
 4239 }
 4240 
 4241 target_id_t
 4242 xpt_path_target_id(struct cam_path *path)
 4243 {
 4244         GIANT_REQUIRED;
 4245 
 4246         if (path->target != NULL)
 4247                 return (path->target->target_id);
 4248         else
 4249                 return (CAM_TARGET_WILDCARD);
 4250 }
 4251 
 4252 lun_id_t
 4253 xpt_path_lun_id(struct cam_path *path)
 4254 {
 4255         GIANT_REQUIRED;
 4256 
 4257         if (path->device != NULL)
 4258                 return (path->device->lun_id);
 4259         else
 4260                 return (CAM_LUN_WILDCARD);
 4261 }
 4262 
 4263 struct cam_sim *
 4264 xpt_path_sim(struct cam_path *path)
 4265 {
 4266         GIANT_REQUIRED;
 4267 
 4268         return (path->bus->sim);
 4269 }
 4270 
 4271 struct cam_periph*
 4272 xpt_path_periph(struct cam_path *path)
 4273 {
 4274         GIANT_REQUIRED;
 4275 
 4276         return (path->periph);
 4277 }
 4278 
 4279 /*
 4280  * Release a CAM control block for the caller.  Remit the cost of the structure
 4281  * to the device referenced by the path.  If the this device had no 'credits'
 4282  * and peripheral drivers have registered async callbacks for this notification
 4283  * call them now.
 4284  */
 4285 void
 4286 xpt_release_ccb(union ccb *free_ccb)
 4287 {
 4288         int      s;
 4289         struct   cam_path *path;
 4290         struct   cam_ed *device;
 4291         struct   cam_eb *bus;
 4292 
 4293         GIANT_REQUIRED;
 4294 
 4295         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_release_ccb\n"));
 4296         path = free_ccb->ccb_h.path;
 4297         device = path->device;
 4298         bus = path->bus;
 4299         s = splsoftcam();
 4300         cam_ccbq_release_opening(&device->ccbq);
 4301         if (xpt_ccb_count > xpt_max_ccbs) {
 4302                 xpt_free_ccb(free_ccb);
 4303                 xpt_ccb_count--;
 4304         } else {
 4305                 SLIST_INSERT_HEAD(&ccb_freeq, &free_ccb->ccb_h, xpt_links.sle);
 4306         }
 4307         bus->sim->devq->alloc_openings++;
 4308         bus->sim->devq->alloc_active--;
 4309         /* XXX Turn this into an inline function - xpt_run_device?? */
 4310         if ((device_is_alloc_queued(device) == 0)
 4311          && (device->drvq.entries > 0)) {
 4312                 xpt_schedule_dev_allocq(bus, device);
 4313         }
 4314         splx(s);
 4315         if (dev_allocq_is_runnable(bus->sim->devq))
 4316                 xpt_run_dev_allocq(bus);
 4317 }
 4318 
 4319 /* Functions accessed by SIM drivers */
 4320 
 4321 /*
 4322  * A sim structure, listing the SIM entry points and instance
 4323  * identification info is passed to xpt_bus_register to hook the SIM
 4324  * into the CAM framework.  xpt_bus_register creates a cam_eb entry
 4325  * for this new bus and places it in the array of busses and assigns
 4326  * it a path_id.  The path_id may be influenced by "hard wiring"
 4327  * information specified by the user.  Once interrupt services are
 4328  * availible, the bus will be probed.
 4329  */
 4330 int32_t
 4331 xpt_bus_register(struct cam_sim *sim, u_int32_t bus)
 4332 {
 4333         struct cam_eb *new_bus;
 4334         struct cam_eb *old_bus;
 4335         struct ccb_pathinq cpi;
 4336         int s;
 4337 
 4338         GIANT_REQUIRED;
 4339 
 4340         sim->bus_id = bus;
 4341         new_bus = (struct cam_eb *)malloc(sizeof(*new_bus),
 4342                                           M_DEVBUF, M_NOWAIT);
 4343         if (new_bus == NULL) {
 4344                 /* Couldn't satisfy request */
 4345                 return (CAM_RESRC_UNAVAIL);
 4346         }
 4347 
 4348         if (strcmp(sim->sim_name, "xpt") != 0) {
 4349 
 4350                 sim->path_id =
 4351                     xptpathid(sim->sim_name, sim->unit_number, sim->bus_id);
 4352         }
 4353 
 4354         TAILQ_INIT(&new_bus->et_entries);
 4355         new_bus->path_id = sim->path_id;
 4356         new_bus->sim = sim;
 4357         timevalclear(&new_bus->last_reset);
 4358         new_bus->flags = 0;
 4359         new_bus->refcount = 1;  /* Held until a bus_deregister event */
 4360         new_bus->generation = 0;
 4361         s = splcam();
 4362         old_bus = TAILQ_FIRST(&xpt_busses);
 4363         while (old_bus != NULL
 4364             && old_bus->path_id < new_bus->path_id)
 4365                 old_bus = TAILQ_NEXT(old_bus, links);
 4366         if (old_bus != NULL)
 4367                 TAILQ_INSERT_BEFORE(old_bus, new_bus, links);
 4368         else
 4369                 TAILQ_INSERT_TAIL(&xpt_busses, new_bus, links);
 4370         bus_generation++;
 4371         splx(s);
 4372 
 4373         /* Notify interested parties */
 4374         if (sim->path_id != CAM_XPT_PATH_ID) {
 4375                 struct cam_path path;
 4376 
 4377                 xpt_compile_path(&path, /*periph*/NULL, sim->path_id,
 4378                                  CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
 4379                 xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
 4380                 cpi.ccb_h.func_code = XPT_PATH_INQ;
 4381                 xpt_action((union ccb *)&cpi);
 4382                 xpt_async(AC_PATH_REGISTERED, &path, &cpi);
 4383                 xpt_release_path(&path);
 4384         }
 4385         return (CAM_SUCCESS);
 4386 }
 4387 
 4388 int32_t
 4389 xpt_bus_deregister(path_id_t pathid)
 4390 {
 4391         struct cam_path bus_path;
 4392         cam_status status;
 4393 
 4394         GIANT_REQUIRED;
 4395 
 4396         status = xpt_compile_path(&bus_path, NULL, pathid,
 4397                                   CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
 4398         if (status != CAM_REQ_CMP)
 4399                 return (status);
 4400 
 4401         xpt_async(AC_LOST_DEVICE, &bus_path, NULL);
 4402         xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL);
 4403         
 4404         /* Release the reference count held while registered. */
 4405         xpt_release_bus(bus_path.bus);
 4406         xpt_release_path(&bus_path);
 4407 
 4408         return (CAM_REQ_CMP);
 4409 }
 4410 
 4411 static path_id_t
 4412 xptnextfreepathid(void)
 4413 {
 4414         struct cam_eb *bus;
 4415         path_id_t pathid;
 4416         const char *strval;
 4417 
 4418         pathid = 0;
 4419         bus = TAILQ_FIRST(&xpt_busses);
 4420 retry:
 4421         /* Find an unoccupied pathid */
 4422         while (bus != NULL
 4423             && bus->path_id <= pathid) {
 4424                 if (bus->path_id == pathid)
 4425                         pathid++;
 4426                 bus = TAILQ_NEXT(bus, links);
 4427         }
 4428 
 4429         /*
 4430          * Ensure that this pathid is not reserved for
 4431          * a bus that may be registered in the future.
 4432          */
 4433         if (resource_string_value("scbus", pathid, "at", &strval) == 0) {
 4434                 ++pathid;
 4435                 /* Start the search over */
 4436                 goto retry;
 4437         }
 4438         return (pathid);
 4439 }
 4440 
 4441 static path_id_t
 4442 xptpathid(const char *sim_name, int sim_unit, int sim_bus)
 4443 {
 4444         path_id_t pathid;
 4445         int i, dunit, val;
 4446         char buf[32];
 4447         const char *dname;
 4448 
 4449         pathid = CAM_XPT_PATH_ID;
 4450         snprintf(buf, sizeof(buf), "%s%d", sim_name, sim_unit);
 4451         i = 0;
 4452         while ((resource_find_match(&i, &dname, &dunit, "at", buf)) == 0) {
 4453                 if (strcmp(dname, "scbus")) {
 4454                         /* Avoid a bit of foot shooting. */
 4455                         continue;
 4456                 }
 4457                 if (dunit < 0)          /* unwired?! */
 4458                         continue;
 4459                 if (resource_int_value("scbus", dunit, "bus", &val) == 0) {
 4460                         if (sim_bus == val) {
 4461                                 pathid = dunit;
 4462                                 break;
 4463                         }
 4464                 } else if (sim_bus == 0) {
 4465                         /* Unspecified matches bus 0 */
 4466                         pathid = dunit;
 4467                         break;
 4468                 } else {
 4469                         printf("Ambiguous scbus configuration for %s%d "
 4470                                "bus %d, cannot wire down.  The kernel "
 4471                                "config entry for scbus%d should "
 4472                                "specify a controller bus.\n"
 4473                                "Scbus will be assigned dynamically.\n",
 4474                                sim_name, sim_unit, sim_bus, dunit);
 4475                         break;
 4476                 }
 4477         }
 4478 
 4479         if (pathid == CAM_XPT_PATH_ID)
 4480                 pathid = xptnextfreepathid();
 4481         return (pathid);
 4482 }
 4483 
 4484 void
 4485 xpt_async(u_int32_t async_code, struct cam_path *path, void *async_arg)
 4486 {
 4487         struct cam_eb *bus;
 4488         struct cam_et *target, *next_target;
 4489         struct cam_ed *device, *next_device;
 4490         int s;
 4491 
 4492         GIANT_REQUIRED;
 4493 
 4494         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_async\n"));
 4495 
 4496         /*
 4497          * Most async events come from a CAM interrupt context.  In
 4498          * a few cases, the error recovery code at the peripheral layer,
 4499          * which may run from our SWI or a process context, may signal
 4500          * deferred events with a call to xpt_async. Ensure async
 4501          * notifications are serialized by blocking cam interrupts.
 4502          */
 4503         s = splcam();
 4504 
 4505         bus = path->bus;
 4506 
 4507         if (async_code == AC_BUS_RESET) { 
 4508                 int s;
 4509 
 4510                 s = splclock();
 4511                 /* Update our notion of when the last reset occurred */
 4512                 microtime(&bus->last_reset);
 4513                 splx(s);
 4514         }
 4515 
 4516         for (target = TAILQ_FIRST(&bus->et_entries);
 4517              target != NULL;
 4518              target = next_target) {
 4519 
 4520                 next_target = TAILQ_NEXT(target, links);
 4521 
 4522                 if (path->target != target
 4523                  && path->target->target_id != CAM_TARGET_WILDCARD
 4524                  && target->target_id != CAM_TARGET_WILDCARD)
 4525                         continue;
 4526 
 4527                 if (async_code == AC_SENT_BDR) {
 4528                         int s;
 4529 
 4530                         /* Update our notion of when the last reset occurred */
 4531                         s = splclock();
 4532                         microtime(&path->target->last_reset);
 4533                         splx(s);
 4534                 }
 4535 
 4536                 for (device = TAILQ_FIRST(&target->ed_entries);
 4537                      device != NULL;
 4538                      device = next_device) {
 4539 
 4540                         next_device = TAILQ_NEXT(device, links);
 4541 
 4542                         if (path->device != device 
 4543                          && path->device->lun_id != CAM_LUN_WILDCARD
 4544                          && device->lun_id != CAM_LUN_WILDCARD)
 4545                                 continue;
 4546 
 4547                         xpt_dev_async(async_code, bus, target,
 4548                                       device, async_arg);
 4549 
 4550                         xpt_async_bcast(&device->asyncs, async_code,
 4551                                         path, async_arg);
 4552                 }
 4553         }
 4554         
 4555         /*
 4556          * If this wasn't a fully wildcarded async, tell all
 4557          * clients that want all async events.
 4558          */
 4559         if (bus != xpt_periph->path->bus)
 4560                 xpt_async_bcast(&xpt_periph->path->device->asyncs, async_code,
 4561                                 path, async_arg);
 4562         splx(s);
 4563 }
 4564 
 4565 static void
 4566 xpt_async_bcast(struct async_list *async_head,
 4567                 u_int32_t async_code,
 4568                 struct cam_path *path, void *async_arg)
 4569 {
 4570         struct async_node *cur_entry;
 4571 
 4572         cur_entry = SLIST_FIRST(async_head);
 4573         while (cur_entry != NULL) {
 4574                 struct async_node *next_entry;
 4575                 /*
 4576                  * Grab the next list entry before we call the current
 4577                  * entry's callback.  This is because the callback function
 4578                  * can delete its async callback entry.
 4579                  */
 4580                 next_entry = SLIST_NEXT(cur_entry, links);
 4581                 if ((cur_entry->event_enable & async_code) != 0)
 4582                         cur_entry->callback(cur_entry->callback_arg,
 4583                                             async_code, path,
 4584                                             async_arg);
 4585                 cur_entry = next_entry;
 4586         }
 4587 }
 4588 
 4589 /*
 4590  * Handle any per-device event notifications that require action by the XPT.
 4591  */
 4592 static void
 4593 xpt_dev_async(u_int32_t async_code, struct cam_eb *bus, struct cam_et *target,
 4594               struct cam_ed *device, void *async_arg)
 4595 {
 4596         cam_status status;
 4597         struct cam_path newpath;
 4598 
 4599         /*
 4600          * We only need to handle events for real devices.
 4601          */
 4602         if (target->target_id == CAM_TARGET_WILDCARD
 4603          || device->lun_id == CAM_LUN_WILDCARD)
 4604                 return;
 4605 
 4606         /*
 4607          * We need our own path with wildcards expanded to
 4608          * handle certain types of events.
 4609          */
 4610         if ((async_code == AC_SENT_BDR)
 4611          || (async_code == AC_BUS_RESET)
 4612          || (async_code == AC_INQ_CHANGED))
 4613                 status = xpt_compile_path(&newpath, NULL,
 4614                                           bus->path_id,
 4615                                           target->target_id,
 4616                                           device->lun_id);
 4617         else
 4618                 status = CAM_REQ_CMP_ERR;
 4619 
 4620         if (status == CAM_REQ_CMP) {
 4621 
 4622                 /*
 4623                  * Allow transfer negotiation to occur in a
 4624                  * tag free environment.
 4625                  */
 4626                 if (async_code == AC_SENT_BDR
 4627                  || async_code == AC_BUS_RESET)
 4628                         xpt_toggle_tags(&newpath);
 4629 
 4630                 if (async_code == AC_INQ_CHANGED) {
 4631                         /*
 4632                          * We've sent a start unit command, or
 4633                          * something similar to a device that
 4634                          * may have caused its inquiry data to
 4635                          * change. So we re-scan the device to
 4636                          * refresh the inquiry data for it.
 4637                          */
 4638                         xpt_scan_lun(newpath.periph, &newpath,
 4639                                      CAM_EXPECT_INQ_CHANGE, NULL);
 4640                 }
 4641                 xpt_release_path(&newpath);
 4642         } else if (async_code == AC_LOST_DEVICE) {
 4643                 device->flags |= CAM_DEV_UNCONFIGURED;
 4644         } else if (async_code == AC_TRANSFER_NEG) {
 4645                 struct ccb_trans_settings *settings;
 4646 
 4647                 settings = (struct ccb_trans_settings *)async_arg;
 4648                 xpt_set_transfer_settings(settings, device,
 4649                                           /*async_update*/TRUE);
 4650         }
 4651 }
 4652 
 4653 u_int32_t
 4654 xpt_freeze_devq(struct cam_path *path, u_int count)
 4655 {
 4656         int s;
 4657         struct ccb_hdr *ccbh;
 4658 
 4659         GIANT_REQUIRED;
 4660 
 4661         s = splcam();
 4662         path->device->qfrozen_cnt += count;
 4663 
 4664         /*
 4665          * Mark the last CCB in the queue as needing
 4666          * to be requeued if the driver hasn't
 4667          * changed it's state yet.  This fixes a race
 4668          * where a ccb is just about to be queued to
 4669          * a controller driver when it's interrupt routine
 4670          * freezes the queue.  To completly close the
 4671          * hole, controller drives must check to see
 4672          * if a ccb's status is still CAM_REQ_INPROG
 4673          * under spl protection just before they queue
 4674          * the CCB.  See ahc_action/ahc_freeze_devq for
 4675          * an example.
 4676          */
 4677         ccbh = TAILQ_LAST(&path->device->ccbq.active_ccbs, ccb_hdr_tailq);
 4678         if (ccbh && ccbh->status == CAM_REQ_INPROG)
 4679                 ccbh->status = CAM_REQUEUE_REQ;
 4680         splx(s);
 4681         return (path->device->qfrozen_cnt);
 4682 }
 4683 
 4684 u_int32_t
 4685 xpt_freeze_simq(struct cam_sim *sim, u_int count)
 4686 {
 4687         GIANT_REQUIRED;
 4688 
 4689         sim->devq->send_queue.qfrozen_cnt += count;
 4690         if (sim->devq->active_dev != NULL) {
 4691                 struct ccb_hdr *ccbh;
 4692                 
 4693                 ccbh = TAILQ_LAST(&sim->devq->active_dev->ccbq.active_ccbs,
 4694                                   ccb_hdr_tailq);
 4695                 if (ccbh && ccbh->status == CAM_REQ_INPROG)
 4696                         ccbh->status = CAM_REQUEUE_REQ;
 4697         }
 4698         return (sim->devq->send_queue.qfrozen_cnt);
 4699 }
 4700 
 4701 static void
 4702 xpt_release_devq_timeout(void *arg)
 4703 {
 4704         struct cam_ed *device;
 4705 
 4706         device = (struct cam_ed *)arg;
 4707 
 4708         xpt_release_devq_device(device, /*count*/1, /*run_queue*/TRUE);
 4709 }
 4710 
 4711 void
 4712 xpt_release_devq(struct cam_path *path, u_int count, int run_queue)
 4713 {
 4714         GIANT_REQUIRED;
 4715 
 4716         xpt_release_devq_device(path->device, count, run_queue);
 4717 }
 4718 
 4719 static void
 4720 xpt_release_devq_device(struct cam_ed *dev, u_int count, int run_queue)
 4721 {
 4722         int     rundevq;
 4723         int     s0, s1;
 4724 
 4725         rundevq = 0;
 4726         s0 = splsoftcam();
 4727         s1 = splcam();
 4728         if (dev->qfrozen_cnt > 0) {
 4729 
 4730                 count = (count > dev->qfrozen_cnt) ? dev->qfrozen_cnt : count;
 4731                 dev->qfrozen_cnt -= count;
 4732                 if (dev->qfrozen_cnt == 0) {
 4733 
 4734                         /*
 4735                          * No longer need to wait for a successful
 4736                          * command completion.
 4737                          */
 4738                         dev->flags &= ~CAM_DEV_REL_ON_COMPLETE;
 4739 
 4740                         /*
 4741                          * Remove any timeouts that might be scheduled
 4742                          * to release this queue.
 4743                          */
 4744                         if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
 4745                                 untimeout(xpt_release_devq_timeout, dev,
 4746                                           dev->c_handle);
 4747                                 dev->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING;
 4748                         }
 4749 
 4750                         /*
 4751                          * Now that we are unfrozen schedule the
 4752                          * device so any pending transactions are
 4753                          * run.
 4754                          */
 4755                         if ((dev->ccbq.queue.entries > 0)
 4756                          && (xpt_schedule_dev_sendq(dev->target->bus, dev))
 4757                          && (run_queue != 0)) {
 4758                                 rundevq = 1;
 4759                         }
 4760                 }
 4761         }
 4762         splx(s1);
 4763         if (rundevq != 0)
 4764                 xpt_run_dev_sendq(dev->target->bus);
 4765         splx(s0);
 4766 }
 4767 
 4768 void
 4769 xpt_release_simq(struct cam_sim *sim, int run_queue)
 4770 {
 4771         int     s;
 4772         struct  camq *sendq;
 4773 
 4774         GIANT_REQUIRED;
 4775 
 4776         sendq = &(sim->devq->send_queue);
 4777         s = splcam();
 4778         if (sendq->qfrozen_cnt > 0) {
 4779 
 4780                 sendq->qfrozen_cnt--;
 4781                 if (sendq->qfrozen_cnt == 0) {
 4782                         struct cam_eb *bus;
 4783 
 4784                         /*
 4785                          * If there is a timeout scheduled to release this
 4786                          * sim queue, remove it.  The queue frozen count is
 4787                          * already at 0.
 4788                          */
 4789                         if ((sim->flags & CAM_SIM_REL_TIMEOUT_PENDING) != 0){
 4790                                 untimeout(xpt_release_simq_timeout, sim,
 4791                                           sim->c_handle);
 4792                                 sim->flags &= ~CAM_SIM_REL_TIMEOUT_PENDING;
 4793                         }
 4794                         bus = xpt_find_bus(sim->path_id);
 4795                         splx(s);
 4796 
 4797                         if (run_queue) {
 4798                                 /*
 4799                                  * Now that we are unfrozen run the send queue.
 4800                                  */
 4801                                 xpt_run_dev_sendq(bus);
 4802                         }
 4803                         xpt_release_bus(bus);
 4804                 } else
 4805                         splx(s);
 4806         } else
 4807                 splx(s);
 4808 }
 4809 
 4810 static void
 4811 xpt_release_simq_timeout(void *arg)
 4812 {
 4813         struct cam_sim *sim;
 4814 
 4815         sim = (struct cam_sim *)arg;
 4816         xpt_release_simq(sim, /* run_queue */ TRUE);
 4817 }
 4818 
 4819 void
 4820 xpt_done(union ccb *done_ccb)
 4821 {
 4822         int s;
 4823 
 4824         GIANT_REQUIRED;
 4825 
 4826         s = splcam();
 4827 
 4828         CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_done\n"));
 4829         if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) != 0) {
 4830                 /*
 4831                  * Queue up the request for handling by our SWI handler
 4832                  * any of the "non-immediate" type of ccbs.
 4833                  */
 4834                 switch (done_ccb->ccb_h.path->periph->type) {
 4835                 case CAM_PERIPH_BIO:
 4836                         TAILQ_INSERT_TAIL(&cam_bioq, &done_ccb->ccb_h,
 4837                                           sim_links.tqe);
 4838                         done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX;
 4839                         swi_sched(cambio_ih, 0);
 4840                         break;
 4841                 case CAM_PERIPH_NET:
 4842                         TAILQ_INSERT_TAIL(&cam_netq, &done_ccb->ccb_h,
 4843                                           sim_links.tqe);
 4844                         done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX;
 4845                         swi_sched(camnet_ih, 0);
 4846                         break;
 4847                 }
 4848         }
 4849         splx(s);
 4850 }
 4851 
 4852 union ccb *
 4853 xpt_alloc_ccb()
 4854 {
 4855         union ccb *new_ccb;
 4856 
 4857         GIANT_REQUIRED;
 4858 
 4859         new_ccb = malloc(sizeof(*new_ccb), M_DEVBUF, M_WAITOK);
 4860         return (new_ccb);
 4861 }
 4862 
 4863 void
 4864 xpt_free_ccb(union ccb *free_ccb)
 4865 {
 4866         free(free_ccb, M_DEVBUF);
 4867 }
 4868 
 4869 
 4870 
 4871 /* Private XPT functions */
 4872 
 4873 /*
 4874  * Get a CAM control block for the caller. Charge the structure to the device
 4875  * referenced by the path.  If the this device has no 'credits' then the
 4876  * device already has the maximum number of outstanding operations under way
 4877  * and we return NULL. If we don't have sufficient resources to allocate more
 4878  * ccbs, we also return NULL.
 4879  */
 4880 static union ccb *
 4881 xpt_get_ccb(struct cam_ed *device)
 4882 {
 4883         union ccb *new_ccb;
 4884         int s;
 4885 
 4886         s = splsoftcam();
 4887         if ((new_ccb = (union ccb *)SLIST_FIRST(&ccb_freeq)) == NULL) {
 4888                 new_ccb = malloc(sizeof(*new_ccb), M_DEVBUF, M_NOWAIT);
 4889                 if (new_ccb == NULL) {
 4890                         splx(s);
 4891                         return (NULL);
 4892                 }
 4893                 callout_handle_init(&new_ccb->ccb_h.timeout_ch);
 4894                 SLIST_INSERT_HEAD(&ccb_freeq, &new_ccb->ccb_h,
 4895                                   xpt_links.sle);
 4896                 xpt_ccb_count++;
 4897         }
 4898         cam_ccbq_take_opening(&device->ccbq);
 4899         SLIST_REMOVE_HEAD(&ccb_freeq, xpt_links.sle);
 4900         splx(s);
 4901         return (new_ccb);
 4902 }
 4903 
 4904 static void
 4905 xpt_release_bus(struct cam_eb *bus)
 4906 {
 4907         int s;
 4908 
 4909         s = splcam();
 4910         if ((--bus->refcount == 0)
 4911          && (TAILQ_FIRST(&bus->et_entries) == NULL)) {
 4912                 TAILQ_REMOVE(&xpt_busses, bus, links);
 4913                 bus_generation++;
 4914                 splx(s);
 4915                 free(bus, M_DEVBUF);
 4916         } else
 4917                 splx(s);
 4918 }
 4919 
 4920 static struct cam_et *
 4921 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id)
 4922 {
 4923         struct cam_et *target;
 4924 
 4925         target = (struct cam_et *)malloc(sizeof(*target), M_DEVBUF, M_NOWAIT);
 4926         if (target != NULL) {
 4927                 struct cam_et *cur_target;
 4928 
 4929                 TAILQ_INIT(&target->ed_entries);
 4930                 target->bus = bus;
 4931                 target->target_id = target_id;
 4932                 target->refcount = 1;
 4933                 target->generation = 0;
 4934                 timevalclear(&target->last_reset);
 4935                 /*
 4936                  * Hold a reference to our parent bus so it
 4937                  * will not go away before we do.
 4938                  */
 4939                 bus->refcount++;
 4940 
 4941                 /* Insertion sort into our bus's target list */
 4942                 cur_target = TAILQ_FIRST(&bus->et_entries);
 4943                 while (cur_target != NULL && cur_target->target_id < target_id)
 4944                         cur_target = TAILQ_NEXT(cur_target, links);
 4945 
 4946                 if (cur_target != NULL) {
 4947                         TAILQ_INSERT_BEFORE(cur_target, target, links);
 4948                 } else {
 4949                         TAILQ_INSERT_TAIL(&bus->et_entries, target, links);
 4950                 }
 4951                 bus->generation++;
 4952         }
 4953         return (target);
 4954 }
 4955 
 4956 static void
 4957 xpt_release_target(struct cam_eb *bus, struct cam_et *target)
 4958 {
 4959         int s;
 4960 
 4961         s = splcam();
 4962         if ((--target->refcount == 0)
 4963          && (TAILQ_FIRST(&target->ed_entries) == NULL)) {
 4964                 TAILQ_REMOVE(&bus->et_entries, target, links);
 4965                 bus->generation++;
 4966                 splx(s);
 4967                 free(target, M_DEVBUF);
 4968                 xpt_release_bus(bus);
 4969         } else
 4970                 splx(s);
 4971 }
 4972 
 4973 static struct cam_ed *
 4974 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id)
 4975 {
 4976 #ifdef CAM_NEW_TRAN_CODE
 4977         struct     cam_path path;
 4978 #endif /* CAM_NEW_TRAN_CODE */
 4979         struct     cam_ed *device;
 4980         struct     cam_devq *devq;
 4981         cam_status status;
 4982 
 4983         /* Make space for us in the device queue on our bus */
 4984         devq = bus->sim->devq;
 4985         status = cam_devq_resize(devq, devq->alloc_queue.array_size + 1);
 4986 
 4987         if (status != CAM_REQ_CMP) {
 4988                 device = NULL;
 4989         } else {
 4990                 device = (struct cam_ed *)malloc(sizeof(*device),
 4991                                                  M_DEVBUF, M_NOWAIT);
 4992         }
 4993 
 4994         if (device != NULL) {
 4995                 struct cam_ed *cur_device;
 4996 
 4997                 cam_init_pinfo(&device->alloc_ccb_entry.pinfo);
 4998                 device->alloc_ccb_entry.device = device;
 4999                 cam_init_pinfo(&device->send_ccb_entry.pinfo);
 5000                 device->send_ccb_entry.device = device;
 5001                 device->target = target;
 5002                 device->lun_id = lun_id;
 5003                 /* Initialize our queues */
 5004                 if (camq_init(&device->drvq, 0) != 0) {
 5005                         free(device, M_DEVBUF);
 5006                         return (NULL);
 5007                 }
 5008                 if (cam_ccbq_init(&device->ccbq,
 5009                                   bus->sim->max_dev_openings) != 0) {
 5010                         camq_fini(&device->drvq);
 5011                         free(device, M_DEVBUF);
 5012                         return (NULL);
 5013                 }
 5014                 SLIST_INIT(&device->asyncs);
 5015                 SLIST_INIT(&device->periphs);
 5016                 device->generation = 0;
 5017                 device->owner = NULL;
 5018                 /*
 5019                  * Take the default quirk entry until we have inquiry
 5020                  * data and can determine a better quirk to use.
 5021                  */
 5022                 device->quirk = &xpt_quirk_table[xpt_quirk_table_size - 1];
 5023                 bzero(&device->inq_data, sizeof(device->inq_data));
 5024                 device->inq_flags = 0;
 5025                 device->queue_flags = 0;
 5026                 device->serial_num = NULL;
 5027                 device->serial_num_len = 0;
 5028                 device->qfrozen_cnt = 0;
 5029                 device->flags = CAM_DEV_UNCONFIGURED;
 5030                 device->tag_delay_count = 0;
 5031                 device->refcount = 1;
 5032                 callout_handle_init(&device->c_handle);
 5033 
 5034                 /*
 5035                  * Hold a reference to our parent target so it
 5036                  * will not go away before we do.
 5037                  */
 5038                 target->refcount++;
 5039 
 5040                 /*
 5041                  * XXX should be limited by number of CCBs this bus can
 5042                  * do.
 5043                  */
 5044                 xpt_max_ccbs += device->ccbq.devq_openings;
 5045                 /* Insertion sort into our target's device list */
 5046                 cur_device = TAILQ_FIRST(&target->ed_entries);
 5047                 while (cur_device != NULL && cur_device->lun_id < lun_id)
 5048                         cur_device = TAILQ_NEXT(cur_device, links);
 5049                 if (cur_device != NULL) {
 5050                         TAILQ_INSERT_BEFORE(cur_device, device, links);
 5051                 } else {
 5052                         TAILQ_INSERT_TAIL(&target->ed_entries, device, links);
 5053                 }
 5054                 target->generation++;
 5055 #ifdef CAM_NEW_TRAN_CODE
 5056                 if (lun_id != CAM_LUN_WILDCARD) {
 5057                         xpt_compile_path(&path,
 5058                                          NULL,
 5059                                          bus->path_id,
 5060                                          target->target_id,
 5061                                          lun_id);
 5062                         xpt_devise_transport(&path);
 5063                         xpt_release_path(&path);
 5064                 }
 5065 #endif /* CAM_NEW_TRAN_CODE */
 5066         }
 5067         return (device);
 5068 }
 5069 
 5070 static void
 5071 xpt_release_device(struct cam_eb *bus, struct cam_et *target,
 5072                    struct cam_ed *device)
 5073 {
 5074         int s;
 5075 
 5076         s = splcam();
 5077         if ((--device->refcount == 0)
 5078          && ((device->flags & CAM_DEV_UNCONFIGURED) != 0)) {
 5079                 struct cam_devq *devq;
 5080 
 5081                 if (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX
 5082                  || device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX)
 5083                         panic("Removing device while still queued for ccbs");
 5084 
 5085                 if ((device->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0)
 5086                                 untimeout(xpt_release_devq_timeout, device,
 5087                                           device->c_handle);
 5088 
 5089                 TAILQ_REMOVE(&target->ed_entries, device,links);
 5090                 target->generation++;
 5091                 xpt_max_ccbs -= device->ccbq.devq_openings;
 5092                 /* Release our slot in the devq */
 5093                 devq = bus->sim->devq;
 5094                 cam_devq_resize(devq, devq->alloc_queue.array_size - 1);
 5095                 splx(s);
 5096                 free(device, M_DEVBUF);
 5097                 xpt_release_target(bus, target);
 5098         } else
 5099                 splx(s);
 5100 }
 5101 
 5102 static u_int32_t
 5103 xpt_dev_ccbq_resize(struct cam_path *path, int newopenings)
 5104 {
 5105         int     s;
 5106         int     diff;
 5107         int     result;
 5108         struct  cam_ed *dev;
 5109 
 5110         dev = path->device;
 5111         s = splsoftcam();
 5112 
 5113         diff = newopenings - (dev->ccbq.dev_active + dev->ccbq.dev_openings);
 5114         result = cam_ccbq_resize(&dev->ccbq, newopenings);
 5115         if (result == CAM_REQ_CMP && (diff < 0)) {
 5116                 dev->flags |= CAM_DEV_RESIZE_QUEUE_NEEDED;
 5117         }
 5118         /* Adjust the global limit */
 5119         xpt_max_ccbs += diff;
 5120         splx(s);
 5121         return (result);
 5122 }
 5123 
 5124 static struct cam_eb *
 5125 xpt_find_bus(path_id_t path_id)
 5126 {
 5127         struct cam_eb *bus;
 5128 
 5129         for (bus = TAILQ_FIRST(&xpt_busses);
 5130              bus != NULL;
 5131              bus = TAILQ_NEXT(bus, links)) {
 5132                 if (bus->path_id == path_id) {
 5133                         bus->refcount++;
 5134                         break;
 5135                 }
 5136         }
 5137         return (bus);
 5138 }
 5139 
 5140 static struct cam_et *
 5141 xpt_find_target(struct cam_eb *bus, target_id_t target_id)
 5142 {
 5143         struct cam_et *target;
 5144 
 5145         for (target = TAILQ_FIRST(&bus->et_entries);
 5146              target != NULL;
 5147              target = TAILQ_NEXT(target, links)) {
 5148                 if (target->target_id == target_id) {
 5149                         target->refcount++;
 5150                         break;
 5151                 }
 5152         }
 5153         return (target);
 5154 }
 5155 
 5156 static struct cam_ed *
 5157 xpt_find_device(struct cam_et *target, lun_id_t lun_id)
 5158 {
 5159         struct cam_ed *device;
 5160 
 5161         for (device = TAILQ_FIRST(&target->ed_entries);
 5162              device != NULL;
 5163              device = TAILQ_NEXT(device, links)) {
 5164                 if (device->lun_id == lun_id) {
 5165                         device->refcount++;
 5166                         break;
 5167                 }
 5168         }
 5169         return (device);
 5170 }
 5171 
 5172 typedef struct {
 5173         union   ccb *request_ccb;
 5174         struct  ccb_pathinq *cpi;
 5175         int     pending_count;
 5176 } xpt_scan_bus_info;
 5177 
 5178 /*
 5179  * To start a scan, request_ccb is an XPT_SCAN_BUS ccb.
 5180  * As the scan progresses, xpt_scan_bus is used as the
 5181  * callback on completion function.
 5182  */
 5183 static void
 5184 xpt_scan_bus(struct cam_periph *periph, union ccb *request_ccb)
 5185 {
 5186         CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE,
 5187                   ("xpt_scan_bus\n"));
 5188         switch (request_ccb->ccb_h.func_code) {
 5189         case XPT_SCAN_BUS:
 5190         {
 5191                 xpt_scan_bus_info *scan_info;
 5192                 union   ccb *work_ccb;
 5193                 struct  cam_path *path;
 5194                 u_int   i;
 5195                 u_int   max_target;
 5196                 u_int   initiator_id;
 5197 
 5198                 /* Find out the characteristics of the bus */
 5199                 work_ccb = xpt_alloc_ccb();
 5200                 xpt_setup_ccb(&work_ccb->ccb_h, request_ccb->ccb_h.path,
 5201                               request_ccb->ccb_h.pinfo.priority);
 5202                 work_ccb->ccb_h.func_code = XPT_PATH_INQ;
 5203                 xpt_action(work_ccb);
 5204                 if (work_ccb->ccb_h.status != CAM_REQ_CMP) {
 5205                         request_ccb->ccb_h.status = work_ccb->ccb_h.status;
 5206                         xpt_free_ccb(work_ccb);
 5207                         xpt_done(request_ccb);
 5208                         return;
 5209                 }
 5210 
 5211                 if ((work_ccb->cpi.hba_misc & PIM_NOINITIATOR) != 0) {
 5212                         /*
 5213                          * Can't scan the bus on an adapter that
 5214                          * cannot perform the initiator role.
 5215                          */
 5216                         request_ccb->ccb_h.status = CAM_REQ_CMP;
 5217                         xpt_free_ccb(work_ccb);
 5218                         xpt_done(request_ccb);
 5219                         return;
 5220                 }
 5221 
 5222                 /* Save some state for use while we probe for devices */
 5223                 scan_info = (xpt_scan_bus_info *)
 5224                     malloc(sizeof(xpt_scan_bus_info), M_TEMP, M_WAITOK);
 5225                 scan_info->request_ccb = request_ccb;
 5226                 scan_info->cpi = &work_ccb->cpi;
 5227 
 5228                 /* Cache on our stack so we can work asynchronously */
 5229                 max_target = scan_info->cpi->max_target;
 5230                 initiator_id = scan_info->cpi->initiator_id;
 5231 
 5232                 /*
 5233                  * Don't count the initiator if the
 5234                  * initiator is addressable.
 5235                  */
 5236                 scan_info->pending_count = max_target + 1;
 5237                 if (initiator_id <= max_target)
 5238                         scan_info->pending_count--;
 5239 
 5240                 for (i = 0; i <= max_target; i++) {
 5241                         cam_status status;
 5242                         if (i == initiator_id)
 5243                                 continue;
 5244 
 5245                         status = xpt_create_path(&path, xpt_periph,
 5246                                                  request_ccb->ccb_h.path_id,
 5247                                                  i, 0);
 5248                         if (status != CAM_REQ_CMP) {
 5249                                 printf("xpt_scan_bus: xpt_create_path failed"
 5250                                        " with status %#x, bus scan halted\n",
 5251                                        status);
 5252                                 break;
 5253                         }
 5254                         work_ccb = xpt_alloc_ccb();
 5255                         xpt_setup_ccb(&work_ccb->ccb_h, path,
 5256                                       request_ccb->ccb_h.pinfo.priority);
 5257                         work_ccb->ccb_h.func_code = XPT_SCAN_LUN;
 5258                         work_ccb->ccb_h.cbfcnp = xpt_scan_bus;
 5259                         work_ccb->ccb_h.ppriv_ptr0 = scan_info;
 5260                         work_ccb->crcn.flags = request_ccb->crcn.flags;
 5261                         xpt_action(work_ccb);
 5262                 }
 5263                 break;
 5264         }
 5265         case XPT_SCAN_LUN:
 5266         {
 5267                 xpt_scan_bus_info *scan_info;
 5268                 path_id_t path_id;
 5269                 target_id_t target_id;
 5270                 lun_id_t lun_id;
 5271 
 5272                 /* Reuse the same CCB to query if a device was really found */
 5273                 scan_info = (xpt_scan_bus_info *)request_ccb->ccb_h.ppriv_ptr0;
 5274                 xpt_setup_ccb(&request_ccb->ccb_h, request_ccb->ccb_h.path,
 5275                               request_ccb->ccb_h.pinfo.priority);
 5276                 request_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
 5277 
 5278                 path_id = request_ccb->ccb_h.path_id;
 5279                 target_id = request_ccb->ccb_h.target_id;
 5280                 lun_id = request_ccb->ccb_h.target_lun;
 5281                 xpt_action(request_ccb);
 5282 
 5283                 if (request_ccb->ccb_h.status != CAM_REQ_CMP) {
 5284                         struct cam_ed *device;
 5285                         struct cam_et *target;
 5286                         int s, phl;
 5287 
 5288                         /*
 5289                          * If we already probed lun 0 successfully, or
 5290                          * we have additional configured luns on this
 5291                          * target that might have "gone away", go onto
 5292                          * the next lun.
 5293                          */
 5294                         target = request_ccb->ccb_h.path->target;
 5295                         /*
 5296                          * We may touch devices that we don't
 5297                          * hold references too, so ensure they
 5298                          * don't disappear out from under us.
 5299                          * The target above is referenced by the
 5300                          * path in the request ccb.
 5301                          */
 5302                         phl = 0;
 5303                         s = splcam();
 5304                         device = TAILQ_FIRST(&target->ed_entries);
 5305                         if (device != NULL) {
 5306                                 phl = device->quirk->quirks & CAM_QUIRK_HILUNS;
 5307                                 if (device->lun_id == 0)
 5308                                         device = TAILQ_NEXT(device, links);
 5309                         }
 5310                         splx(s);
 5311                         if ((lun_id != 0) || (device != NULL)) {
 5312                                 if (lun_id < (CAM_SCSI2_MAXLUN-1) || phl)
 5313                                         lun_id++;
 5314                         }
 5315                 } else {
 5316                         struct cam_ed *device;
 5317                         
 5318                         device = request_ccb->ccb_h.path->device;
 5319 
 5320                         if ((device->quirk->quirks & CAM_QUIRK_NOLUNS) == 0) {
 5321                                 /* Try the next lun */
 5322                                 if (lun_id < (CAM_SCSI2_MAXLUN-1) ||
 5323                                     (device->quirk->quirks & CAM_QUIRK_HILUNS))
 5324                                         lun_id++;
 5325                         }
 5326                 }
 5327 
 5328                 xpt_free_path(request_ccb->ccb_h.path);
 5329 
 5330                 /* Check Bounds */
 5331                 if ((lun_id == request_ccb->ccb_h.target_lun)
 5332                  || lun_id > scan_info->cpi->max_lun) {
 5333                         /* We're done */
 5334 
 5335                         xpt_free_ccb(request_ccb);
 5336                         scan_info->pending_count--;
 5337                         if (scan_info->pending_count == 0) {
 5338                                 xpt_free_ccb((union ccb *)scan_info->cpi);
 5339                                 request_ccb = scan_info->request_ccb;
 5340                                 free(scan_info, M_TEMP);
 5341                                 request_ccb->ccb_h.status = CAM_REQ_CMP;
 5342                                 xpt_done(request_ccb);
 5343                         }
 5344                 } else {
 5345                         /* Try the next device */
 5346                         struct cam_path *path;
 5347                         cam_status status;
 5348 
 5349                         path = request_ccb->ccb_h.path;
 5350                         status = xpt_create_path(&path, xpt_periph,
 5351                                                  path_id, target_id, lun_id);
 5352                         if (status != CAM_REQ_CMP) {
 5353                                 printf("xpt_scan_bus: xpt_create_path failed "
 5354                                        "with status %#x, halting LUN scan\n",
 5355                                        status);
 5356                                 xpt_free_ccb(request_ccb);
 5357                                 scan_info->pending_count--;
 5358                                 if (scan_info->pending_count == 0) {
 5359                                         xpt_free_ccb(
 5360                                                 (union ccb *)scan_info->cpi);
 5361                                         request_ccb = scan_info->request_ccb;
 5362                                         free(scan_info, M_TEMP);
 5363                                         request_ccb->ccb_h.status = CAM_REQ_CMP;
 5364                                         xpt_done(request_ccb);
 5365                                         break;
 5366                                 }
 5367                         }
 5368                         xpt_setup_ccb(&request_ccb->ccb_h, path,
 5369                                       request_ccb->ccb_h.pinfo.priority);
 5370                         request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
 5371                         request_ccb->ccb_h.cbfcnp = xpt_scan_bus;
 5372                         request_ccb->ccb_h.ppriv_ptr0 = scan_info;
 5373                         request_ccb->crcn.flags =
 5374                                 scan_info->request_ccb->crcn.flags;
 5375                         xpt_action(request_ccb);
 5376                 }
 5377                 break;
 5378         }
 5379         default:
 5380                 break;
 5381         }
 5382 }
 5383 
 5384 typedef enum {
 5385         PROBE_TUR,
 5386         PROBE_INQUIRY,
 5387         PROBE_FULL_INQUIRY,
 5388         PROBE_MODE_SENSE,
 5389         PROBE_SERIAL_NUM,
 5390         PROBE_TUR_FOR_NEGOTIATION
 5391 } probe_action;
 5392 
 5393 typedef enum {
 5394         PROBE_INQUIRY_CKSUM     = 0x01,
 5395         PROBE_SERIAL_CKSUM      = 0x02,
 5396         PROBE_NO_ANNOUNCE       = 0x04
 5397 } probe_flags;
 5398 
 5399 typedef struct {
 5400         TAILQ_HEAD(, ccb_hdr) request_ccbs;
 5401         probe_action    action;
 5402         union ccb       saved_ccb;
 5403         probe_flags     flags;
 5404         MD5_CTX         context;
 5405         u_int8_t        digest[16];
 5406 } probe_softc;
 5407 
 5408 static void
 5409 xpt_scan_lun(struct cam_periph *periph, struct cam_path *path,
 5410              cam_flags flags, union ccb *request_ccb)
 5411 {
 5412         struct ccb_pathinq cpi;
 5413         cam_status status;
 5414         struct cam_path *new_path;
 5415         struct cam_periph *old_periph;
 5416         int s;
 5417         
 5418         CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE,
 5419                   ("xpt_scan_lun\n"));
 5420         
 5421         xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
 5422         cpi.ccb_h.func_code = XPT_PATH_INQ;
 5423         xpt_action((union ccb *)&cpi);
 5424 
 5425         if (cpi.ccb_h.status != CAM_REQ_CMP) {
 5426                 if (request_ccb != NULL) {
 5427                         request_ccb->ccb_h.status = cpi.ccb_h.status;
 5428                         xpt_done(request_ccb);
 5429                 }
 5430                 return;
 5431         }
 5432 
 5433         if ((cpi.hba_misc & PIM_NOINITIATOR) != 0) {
 5434                 /*
 5435                  * Can't scan the bus on an adapter that
 5436                  * cannot perform the initiator role.
 5437                  */
 5438                 if (request_ccb != NULL) {
 5439                         request_ccb->ccb_h.status = CAM_REQ_CMP;
 5440                         xpt_done(request_ccb);
 5441                 }
 5442                 return;
 5443         }
 5444 
 5445         if (request_ccb == NULL) {
 5446                 request_ccb = malloc(sizeof(union ccb), M_TEMP, M_NOWAIT);
 5447                 if (request_ccb == NULL) {
 5448                         xpt_print_path(path);
 5449                         printf("xpt_scan_lun: can't allocate CCB, can't "
 5450                                "continue\n");
 5451                         return;
 5452                 }
 5453                 new_path = malloc(sizeof(*new_path), M_TEMP, M_NOWAIT);
 5454                 if (new_path == NULL) {
 5455                         xpt_print_path(path);
 5456                         printf("xpt_scan_lun: can't allocate path, can't "
 5457                                "continue\n");
 5458                         free(request_ccb, M_TEMP);
 5459                         return;
 5460                 }
 5461                 status = xpt_compile_path(new_path, xpt_periph,
 5462                                           path->bus->path_id,
 5463                                           path->target->target_id,
 5464                                           path->device->lun_id);
 5465 
 5466                 if (status != CAM_REQ_CMP) {
 5467                         xpt_print_path(path);
 5468                         printf("xpt_scan_lun: can't compile path, can't "
 5469                                "continue\n");
 5470                         free(request_ccb, M_TEMP);
 5471                         free(new_path, M_TEMP);
 5472                         return;
 5473                 }
 5474                 xpt_setup_ccb(&request_ccb->ccb_h, new_path, /*priority*/ 1);
 5475                 request_ccb->ccb_h.cbfcnp = xptscandone;
 5476                 request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
 5477                 request_ccb->crcn.flags = flags;
 5478         }
 5479 
 5480         s = splsoftcam();
 5481         if ((old_periph = cam_periph_find(path, "probe")) != NULL) {
 5482                 probe_softc *softc;
 5483 
 5484                 softc = (probe_softc *)old_periph->softc;
 5485                 TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h,
 5486                                   periph_links.tqe);
 5487         } else {
 5488                 status = cam_periph_alloc(proberegister, NULL, probecleanup,
 5489                                           probestart, "probe",
 5490                                           CAM_PERIPH_BIO,
 5491                                           request_ccb->ccb_h.path, NULL, 0,
 5492                                           request_ccb);
 5493 
 5494                 if (status != CAM_REQ_CMP) {
 5495                         xpt_print_path(path);
 5496                         printf("xpt_scan_lun: cam_alloc_periph returned an "
 5497                                "error, can't continue probe\n");
 5498                         request_ccb->ccb_h.status = status;
 5499                         xpt_done(request_ccb);
 5500                 }
 5501         }
 5502         splx(s);
 5503 }
 5504 
 5505 static void
 5506 xptscandone(struct cam_periph *periph, union ccb *done_ccb)
 5507 {
 5508         xpt_release_path(done_ccb->ccb_h.path);
 5509         free(done_ccb->ccb_h.path, M_TEMP);
 5510         free(done_ccb, M_TEMP);
 5511 }
 5512 
 5513 static cam_status
 5514 proberegister(struct cam_periph *periph, void *arg)
 5515 {
 5516         union ccb *request_ccb; /* CCB representing the probe request */
 5517         probe_softc *softc;
 5518 
 5519         request_ccb = (union ccb *)arg;
 5520         if (periph == NULL) {
 5521                 printf("proberegister: periph was NULL!!\n");
 5522                 return(CAM_REQ_CMP_ERR);
 5523         }
 5524 
 5525         if (request_ccb == NULL) {
 5526                 printf("proberegister: no probe CCB, "
 5527                        "can't register device\n");
 5528                 return(CAM_REQ_CMP_ERR);
 5529         }
 5530 
 5531         softc = (probe_softc *)malloc(sizeof(*softc), M_TEMP, M_NOWAIT);
 5532 
 5533         if (softc == NULL) {
 5534                 printf("proberegister: Unable to probe new device. "
 5535                        "Unable to allocate softc\n");                           
 5536                 return(CAM_REQ_CMP_ERR);
 5537         }
 5538         TAILQ_INIT(&softc->request_ccbs);
 5539         TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h,
 5540                           periph_links.tqe);
 5541         softc->flags = 0;
 5542         periph->softc = softc;
 5543         cam_periph_acquire(periph);
 5544         /*
 5545          * Ensure we've waited at least a bus settle
 5546          * delay before attempting to probe the device.
 5547          * For HBAs that don't do bus resets, this won't make a difference.
 5548          */
 5549         cam_periph_freeze_after_event(periph, &periph->path->bus->last_reset,
 5550                                       scsi_delay);
 5551         probeschedule(periph);
 5552         return(CAM_REQ_CMP);
 5553 }
 5554 
 5555 static void
 5556 probeschedule(struct cam_periph *periph)
 5557 {
 5558         struct ccb_pathinq cpi;
 5559         union ccb *ccb;
 5560         probe_softc *softc;
 5561 
 5562         softc = (probe_softc *)periph->softc;
 5563         ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs);
 5564 
 5565         xpt_setup_ccb(&cpi.ccb_h, periph->path, /*priority*/1);
 5566         cpi.ccb_h.func_code = XPT_PATH_INQ;
 5567         xpt_action((union ccb *)&cpi);
 5568 
 5569         /*
 5570          * If a device has gone away and another device, or the same one,
 5571          * is back in the same place, it should have a unit attention
 5572          * condition pending.  It will not report the unit attention in
 5573          * response to an inquiry, which may leave invalid transfer
 5574          * negotiations in effect.  The TUR will reveal the unit attention
 5575          * condition.  Only send the TUR for lun 0, since some devices 
 5576          * will get confused by commands other than inquiry to non-existent
 5577          * luns.  If you think a device has gone away start your scan from
 5578          * lun 0.  This will insure that any bogus transfer settings are
 5579          * invalidated.
 5580          *
 5581          * If we haven't seen the device before and the controller supports
 5582          * some kind of transfer negotiation, negotiate with the first
 5583          * sent command if no bus reset was performed at startup.  This
 5584          * ensures that the device is not confused by transfer negotiation
 5585          * settings left over by loader or BIOS action.
 5586          */
 5587         if (((ccb->ccb_h.path->device->flags & CAM_DEV_UNCONFIGURED) == 0)
 5588          && (ccb->ccb_h.target_lun == 0)) {
 5589                 softc->action = PROBE_TUR;
 5590         } else if ((cpi.hba_inquiry & (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE)) != 0
 5591               && (cpi.hba_misc & PIM_NOBUSRESET) != 0) {
 5592                 proberequestdefaultnegotiation(periph);
 5593                 softc->action = PROBE_INQUIRY;
 5594         } else {
 5595                 softc->action = PROBE_INQUIRY;
 5596         }
 5597 
 5598         if (ccb->crcn.flags & CAM_EXPECT_INQ_CHANGE)
 5599                 softc->flags |= PROBE_NO_ANNOUNCE;
 5600         else
 5601                 softc->flags &= ~PROBE_NO_ANNOUNCE;
 5602 
 5603         xpt_schedule(periph, ccb->ccb_h.pinfo.priority);
 5604 }
 5605 
 5606 static void
 5607 probestart(struct cam_periph *periph, union ccb *start_ccb)
 5608 {
 5609         /* Probe the device that our peripheral driver points to */
 5610         struct ccb_scsiio *csio;
 5611         probe_softc *softc;
 5612 
 5613         CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probestart\n"));
 5614 
 5615         softc = (probe_softc *)periph->softc;
 5616         csio = &start_ccb->csio;
 5617 
 5618         switch (softc->action) {
 5619         case PROBE_TUR:
 5620         case PROBE_TUR_FOR_NEGOTIATION:
 5621         {
 5622                 scsi_test_unit_ready(csio,
 5623                                      /*retries*/4,
 5624                                      probedone,
 5625                                      MSG_SIMPLE_Q_TAG,
 5626                                      SSD_FULL_SIZE,
 5627                                      /*timeout*/60000);
 5628                 break;
 5629         }
 5630         case PROBE_INQUIRY:
 5631         case PROBE_FULL_INQUIRY:
 5632         {
 5633                 u_int inquiry_len;
 5634                 struct scsi_inquiry_data *inq_buf;
 5635 
 5636                 inq_buf = &periph->path->device->inq_data;
 5637                 /*
 5638                  * If the device is currently configured, we calculate an
 5639                  * MD5 checksum of the inquiry data, and if the serial number
 5640                  * length is greater than 0, add the serial number data
 5641                  * into the checksum as well.  Once the inquiry and the
 5642                  * serial number check finish, we attempt to figure out
 5643                  * whether we still have the same device.
 5644                  */
 5645                 if ((periph->path->device->flags & CAM_DEV_UNCONFIGURED) == 0) {
 5646                         
 5647                         MD5Init(&softc->context);
 5648                         MD5Update(&softc->context, (unsigned char *)inq_buf,
 5649                                   sizeof(struct scsi_inquiry_data));
 5650                         softc->flags |= PROBE_INQUIRY_CKSUM;
 5651                         if (periph->path->device->serial_num_len > 0) {
 5652                                 MD5Update(&softc->context,
 5653                                           periph->path->device->serial_num,
 5654                                           periph->path->device->serial_num_len);
 5655                                 softc->flags |= PROBE_SERIAL_CKSUM;
 5656                         }
 5657                         MD5Final(softc->digest, &softc->context);
 5658                 } 
 5659 
 5660                 if (softc->action == PROBE_INQUIRY)
 5661                         inquiry_len = SHORT_INQUIRY_LENGTH;
 5662                 else
 5663                         inquiry_len = inq_buf->additional_length + 4;
 5664         
 5665                 scsi_inquiry(csio,
 5666                              /*retries*/4,
 5667                              probedone,
 5668                              MSG_SIMPLE_Q_TAG,
 5669                              (u_int8_t *)inq_buf,
 5670                              inquiry_len,
 5671                              /*evpd*/FALSE,
 5672                              /*page_code*/0,
 5673                              SSD_MIN_SIZE,
 5674                              /*timeout*/60 * 1000);
 5675                 break;
 5676         }
 5677         case PROBE_MODE_SENSE:
 5678         {
 5679                 void  *mode_buf;
 5680                 int    mode_buf_len;
 5681 
 5682                 mode_buf_len = sizeof(struct scsi_mode_header_6)
 5683                              + sizeof(struct scsi_mode_blk_desc)
 5684                              + sizeof(struct scsi_control_page);
 5685                 mode_buf = malloc(mode_buf_len, M_TEMP, M_NOWAIT);
 5686                 if (mode_buf != NULL) {
 5687                         scsi_mode_sense(csio,
 5688                                         /*retries*/4,
 5689                                         probedone,
 5690                                         MSG_SIMPLE_Q_TAG,
 5691                                         /*dbd*/FALSE,
 5692                                         SMS_PAGE_CTRL_CURRENT,
 5693                                         SMS_CONTROL_MODE_PAGE,
 5694                                         mode_buf,
 5695                                         mode_buf_len,
 5696                                         SSD_FULL_SIZE,
 5697                                         /*timeout*/60000);
 5698                         break;
 5699                 }
 5700                 xpt_print_path(periph->path);
 5701                 printf("Unable to mode sense control page - malloc failure\n");
 5702                 softc->action = PROBE_SERIAL_NUM;
 5703         }
 5704         /* FALLTHROUGH */
 5705         case PROBE_SERIAL_NUM:
 5706         {
 5707                 struct scsi_vpd_unit_serial_number *serial_buf;
 5708                 struct cam_ed* device;
 5709 
 5710                 serial_buf = NULL;
 5711                 device = periph->path->device;
 5712                 device->serial_num = NULL;
 5713                 device->serial_num_len = 0;
 5714 
 5715                 if ((device->quirk->quirks & CAM_QUIRK_NOSERIAL) == 0)
 5716                         serial_buf = (struct scsi_vpd_unit_serial_number *)
 5717                                 malloc(sizeof(*serial_buf), M_TEMP,
 5718                                         M_NOWAIT | M_ZERO);
 5719 
 5720                 if (serial_buf != NULL) {
 5721                         scsi_inquiry(csio,
 5722                                      /*retries*/4,
 5723                                      probedone,
 5724                                      MSG_SIMPLE_Q_TAG,
 5725                                      (u_int8_t *)serial_buf,
 5726                                      sizeof(*serial_buf),
 5727                                      /*evpd*/TRUE,
 5728                                      SVPD_UNIT_SERIAL_NUMBER,
 5729                                      SSD_MIN_SIZE,
 5730                                      /*timeout*/60 * 1000);
 5731                         break;
 5732                 }
 5733                 /*
 5734                  * We'll have to do without, let our probedone
 5735                  * routine finish up for us.
 5736                  */
 5737                 start_ccb->csio.data_ptr = NULL;
 5738                 probedone(periph, start_ccb);
 5739                 return;
 5740         }
 5741         }
 5742         xpt_action(start_ccb);
 5743 }
 5744 
 5745 static void
 5746 proberequestdefaultnegotiation(struct cam_periph *periph)
 5747 {
 5748         struct ccb_trans_settings cts;
 5749 
 5750         xpt_setup_ccb(&cts.ccb_h, periph->path, /*priority*/1);
 5751         cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
 5752 #ifdef CAM_NEW_TRAN_CODE
 5753         cts.type = CTS_TYPE_USER_SETTINGS;
 5754 #else /* CAM_NEW_TRAN_CODE */
 5755         cts.flags = CCB_TRANS_USER_SETTINGS;
 5756 #endif /* CAM_NEW_TRAN_CODE */
 5757         xpt_action((union ccb *)&cts);
 5758         cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
 5759 #ifdef CAM_NEW_TRAN_CODE
 5760         cts.type = CTS_TYPE_CURRENT_SETTINGS;
 5761 #else /* CAM_NEW_TRAN_CODE */
 5762         cts.flags &= ~CCB_TRANS_USER_SETTINGS;
 5763         cts.flags |= CCB_TRANS_CURRENT_SETTINGS;
 5764 #endif /* CAM_NEW_TRAN_CODE */
 5765         xpt_action((union ccb *)&cts);
 5766 }
 5767 
 5768 static void
 5769 probedone(struct cam_periph *periph, union ccb *done_ccb)
 5770 {
 5771         probe_softc *softc;
 5772         struct cam_path *path;
 5773         u_int32_t  priority;
 5774 
 5775         CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probedone\n"));
 5776 
 5777         softc = (probe_softc *)periph->softc;
 5778         path = done_ccb->ccb_h.path;
 5779         priority = done_ccb->ccb_h.pinfo.priority;
 5780 
 5781         switch (softc->action) {
 5782         case PROBE_TUR:
 5783         {
 5784                 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
 5785 
 5786                         if (cam_periph_error(done_ccb, 0,
 5787                                              SF_NO_PRINT, NULL) == ERESTART)
 5788                                 return;
 5789                         else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
 5790                                 /* Don't wedge the queue */
 5791                                 xpt_release_devq(done_ccb->ccb_h.path,
 5792                                                  /*count*/1,
 5793                                                  /*run_queue*/TRUE);
 5794                 }
 5795                 softc->action = PROBE_INQUIRY;
 5796                 xpt_release_ccb(done_ccb);
 5797                 xpt_schedule(periph, priority);
 5798                 return;
 5799         }
 5800         case PROBE_INQUIRY:
 5801         case PROBE_FULL_INQUIRY:
 5802         {
 5803                 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
 5804                         struct scsi_inquiry_data *inq_buf;
 5805                         u_int8_t periph_qual;
 5806 
 5807                         path->device->flags |= CAM_DEV_INQUIRY_DATA_VALID;
 5808                         inq_buf = &path->device->inq_data;
 5809 
 5810                         periph_qual = SID_QUAL(inq_buf);
 5811                         
 5812                         switch(periph_qual) {
 5813                         case SID_QUAL_LU_CONNECTED:
 5814                         {
 5815                                 u_int8_t alen;
 5816 
 5817                                 /*
 5818                                  * We conservatively request only
 5819                                  * SHORT_INQUIRY_LEN bytes of inquiry
 5820                                  * information during our first try
 5821                                  * at sending an INQUIRY. If the device
 5822                                  * has more information to give,
 5823                                  * perform a second request specifying
 5824                                  * the amount of information the device
 5825                                  * is willing to give.
 5826                                  */
 5827                                 alen = inq_buf->additional_length;
 5828                                 if (softc->action == PROBE_INQUIRY
 5829                                  && alen > (SHORT_INQUIRY_LENGTH - 4)) {
 5830                                         softc->action = PROBE_FULL_INQUIRY;
 5831                                         xpt_release_ccb(done_ccb);
 5832                                         xpt_schedule(periph, priority);
 5833                                         return;
 5834                                 }
 5835 
 5836                                 xpt_find_quirk(path->device);
 5837 
 5838 #ifdef CAM_NEW_TRAN_CODE
 5839                                 xpt_devise_transport(path);
 5840 #endif /* CAM_NEW_TRAN_CODE */
 5841                                 if ((inq_buf->flags & SID_CmdQue) != 0)
 5842                                         softc->action = PROBE_MODE_SENSE;
 5843                                 else
 5844                                         softc->action = PROBE_SERIAL_NUM;
 5845 
 5846                                 path->device->flags &= ~CAM_DEV_UNCONFIGURED;
 5847 
 5848                                 xpt_release_ccb(done_ccb);
 5849                                 xpt_schedule(periph, priority);
 5850                                 return;
 5851                         }
 5852                         default:
 5853                                 break;
 5854                         }
 5855                 } else if (cam_periph_error(done_ccb, 0,
 5856                                             done_ccb->ccb_h.target_lun > 0
 5857                                             ? SF_RETRY_UA|SF_QUIET_IR
 5858                                             : SF_RETRY_UA,
 5859                                             &softc->saved_ccb) == ERESTART) {
 5860                         return;
 5861                 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
 5862                         /* Don't wedge the queue */
 5863                         xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
 5864                                          /*run_queue*/TRUE);
 5865                 }
 5866                 /*
 5867                  * If we get to this point, we got an error status back
 5868                  * from the inquiry and the error status doesn't require
 5869                  * automatically retrying the command.  Therefore, the
 5870                  * inquiry failed.  If we had inquiry information before
 5871                  * for this device, but this latest inquiry command failed,
 5872                  * the device has probably gone away.  If this device isn't
 5873                  * already marked unconfigured, notify the peripheral
 5874                  * drivers that this device is no more.
 5875                  */
 5876                 if ((path->device->flags & CAM_DEV_UNCONFIGURED) == 0)
 5877                         /* Send the async notification. */
 5878                         xpt_async(AC_LOST_DEVICE, path, NULL);
 5879 
 5880                 xpt_release_ccb(done_ccb);
 5881                 break;
 5882         }
 5883         case PROBE_MODE_SENSE:
 5884         {
 5885                 struct ccb_scsiio *csio;
 5886                 struct scsi_mode_header_6 *mode_hdr;
 5887 
 5888                 csio = &done_ccb->csio;
 5889                 mode_hdr = (struct scsi_mode_header_6 *)csio->data_ptr;
 5890                 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
 5891                         struct scsi_control_page *page;
 5892                         u_int8_t *offset;
 5893 
 5894                         offset = ((u_int8_t *)&mode_hdr[1])
 5895                             + mode_hdr->blk_desc_len;
 5896                         page = (struct scsi_control_page *)offset;
 5897                         path->device->queue_flags = page->queue_flags;
 5898                 } else if (cam_periph_error(done_ccb, 0,
 5899                                             SF_RETRY_UA|SF_NO_PRINT,
 5900                                             &softc->saved_ccb) == ERESTART) {
 5901                         return;
 5902                 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
 5903                         /* Don't wedge the queue */
 5904                         xpt_release_devq(done_ccb->ccb_h.path,
 5905                                          /*count*/1, /*run_queue*/TRUE);
 5906                 }
 5907                 xpt_release_ccb(done_ccb);
 5908                 free(mode_hdr, M_TEMP);
 5909                 softc->action = PROBE_SERIAL_NUM;
 5910                 xpt_schedule(periph, priority);
 5911                 return;
 5912         }
 5913         case PROBE_SERIAL_NUM:
 5914         {
 5915                 struct ccb_scsiio *csio;
 5916                 struct scsi_vpd_unit_serial_number *serial_buf;
 5917                 u_int32_t  priority;
 5918                 int changed;
 5919                 int have_serialnum;
 5920 
 5921                 changed = 1;
 5922                 have_serialnum = 0;
 5923                 csio = &done_ccb->csio;
 5924                 priority = done_ccb->ccb_h.pinfo.priority;
 5925                 serial_buf =
 5926                     (struct scsi_vpd_unit_serial_number *)csio->data_ptr;
 5927 
 5928                 /* Clean up from previous instance of this device */
 5929                 if (path->device->serial_num != NULL) {
 5930                         free(path->device->serial_num, M_DEVBUF);
 5931                         path->device->serial_num = NULL;
 5932                         path->device->serial_num_len = 0;
 5933                 }
 5934 
 5935                 if (serial_buf == NULL) {
 5936                         /*
 5937                          * Don't process the command as it was never sent
 5938                          */
 5939                 } else if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP
 5940                         && (serial_buf->length > 0)) {
 5941 
 5942                         have_serialnum = 1;
 5943                         path->device->serial_num =
 5944                                 (u_int8_t *)malloc((serial_buf->length + 1),
 5945                                                    M_DEVBUF, M_NOWAIT);
 5946                         if (path->device->serial_num != NULL) {
 5947                                 bcopy(serial_buf->serial_num,
 5948                                       path->device->serial_num,
 5949                                       serial_buf->length);
 5950                                 path->device->serial_num_len =
 5951                                     serial_buf->length;
 5952                                 path->device->serial_num[serial_buf->length]
 5953                                     = '\0';
 5954                         }
 5955                 } else if (cam_periph_error(done_ccb, 0,
 5956                                             SF_RETRY_UA|SF_NO_PRINT,
 5957                                             &softc->saved_ccb) == ERESTART) {
 5958                         return;
 5959                 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
 5960                         /* Don't wedge the queue */
 5961                         xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
 5962                                          /*run_queue*/TRUE);
 5963                 }
 5964                 
 5965                 /*
 5966                  * Let's see if we have seen this device before.
 5967                  */
 5968                 if ((softc->flags & PROBE_INQUIRY_CKSUM) != 0) {
 5969                         MD5_CTX context;
 5970                         u_int8_t digest[16];
 5971 
 5972                         MD5Init(&context);
 5973                         
 5974                         MD5Update(&context,
 5975                                   (unsigned char *)&path->device->inq_data,
 5976                                   sizeof(struct scsi_inquiry_data));
 5977 
 5978                         if (have_serialnum)
 5979                                 MD5Update(&context, serial_buf->serial_num,
 5980                                           serial_buf->length);
 5981 
 5982                         MD5Final(digest, &context);
 5983                         if (bcmp(softc->digest, digest, 16) == 0)
 5984                                 changed = 0;
 5985 
 5986                         /*
 5987                          * XXX Do we need to do a TUR in order to ensure
 5988                          *     that the device really hasn't changed???
 5989                          */
 5990                         if ((changed != 0)
 5991                          && ((softc->flags & PROBE_NO_ANNOUNCE) == 0))
 5992                                 xpt_async(AC_LOST_DEVICE, path, NULL);
 5993                 }
 5994                 if (serial_buf != NULL)
 5995                         free(serial_buf, M_TEMP);
 5996 
 5997                 if (changed != 0) {
 5998                         /*
 5999                          * Now that we have all the necessary
 6000                          * information to safely perform transfer
 6001                          * negotiations... Controllers don't perform
 6002                          * any negotiation or tagged queuing until
 6003                          * after the first XPT_SET_TRAN_SETTINGS ccb is
 6004                          * received.  So, on a new device, just retreive
 6005                          * the user settings, and set them as the current
 6006                          * settings to set the device up.
 6007                          */
 6008                         proberequestdefaultnegotiation(periph);
 6009                         xpt_release_ccb(done_ccb);
 6010 
 6011                         /*
 6012                          * Perform a TUR to allow the controller to
 6013                          * perform any necessary transfer negotiation.
 6014                          */
 6015                         softc->action = PROBE_TUR_FOR_NEGOTIATION;
 6016                         xpt_schedule(periph, priority);
 6017                         return;
 6018                 }
 6019                 xpt_release_ccb(done_ccb);
 6020                 break;
 6021         }
 6022         case PROBE_TUR_FOR_NEGOTIATION:
 6023                 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
 6024                         /* Don't wedge the queue */
 6025                         xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
 6026                                          /*run_queue*/TRUE);
 6027                 }
 6028 
 6029                 path->device->flags &= ~CAM_DEV_UNCONFIGURED;
 6030 
 6031                 if ((softc->flags & PROBE_NO_ANNOUNCE) == 0) {
 6032                         /* Inform the XPT that a new device has been found */
 6033                         done_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
 6034                         xpt_action(done_ccb);
 6035 
 6036                         xpt_async(AC_FOUND_DEVICE, xpt_periph->path, done_ccb);
 6037                 }
 6038                 xpt_release_ccb(done_ccb);
 6039                 break;
 6040         }
 6041         done_ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs);
 6042         TAILQ_REMOVE(&softc->request_ccbs, &done_ccb->ccb_h, periph_links.tqe);
 6043         done_ccb->ccb_h.status = CAM_REQ_CMP;
 6044         xpt_done(done_ccb);
 6045         if (TAILQ_FIRST(&softc->request_ccbs) == NULL) {
 6046                 cam_periph_invalidate(periph);
 6047                 cam_periph_release(periph);
 6048         } else {
 6049                 probeschedule(periph);
 6050         }
 6051 }
 6052 
 6053 static void
 6054 probecleanup(struct cam_periph *periph)
 6055 {
 6056         free(periph->softc, M_TEMP);
 6057 }
 6058 
 6059 static void
 6060 xpt_find_quirk(struct cam_ed *device)
 6061 {
 6062         caddr_t match;
 6063 
 6064         match = cam_quirkmatch((caddr_t)&device->inq_data,
 6065                                (caddr_t)xpt_quirk_table,
 6066                                sizeof(xpt_quirk_table)/sizeof(*xpt_quirk_table),
 6067                                sizeof(*xpt_quirk_table), scsi_inquiry_match);
 6068 
 6069         if (match == NULL)
 6070                 panic("xpt_find_quirk: device didn't match wildcard entry!!");
 6071 
 6072         device->quirk = (struct xpt_quirk_entry *)match;
 6073 }
 6074 
 6075 #ifdef CAM_NEW_TRAN_CODE
 6076 
 6077 static void
 6078 xpt_devise_transport(struct cam_path *path)
 6079 {
 6080         struct ccb_pathinq cpi;
 6081         struct ccb_trans_settings cts;
 6082         struct scsi_inquiry_data *inq_buf;
 6083 
 6084         /* Get transport information from the SIM */
 6085         xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
 6086         cpi.ccb_h.func_code = XPT_PATH_INQ;
 6087         xpt_action((union ccb *)&cpi);
 6088 
 6089         inq_buf = NULL;
 6090         if ((path->device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0)
 6091                 inq_buf = &path->device->inq_data;
 6092         path->device->protocol = PROTO_SCSI;
 6093         path->device->protocol_version =
 6094             inq_buf != NULL ? SID_ANSI_REV(inq_buf) : cpi.protocol_version;
 6095         path->device->transport = cpi.transport;
 6096         path->device->transport_version = cpi.transport_version;
 6097 
 6098         /*
 6099          * Any device not using SPI3 features should
 6100          * be considered SPI2 or lower.
 6101          */
 6102         if (inq_buf != NULL) {
 6103                 if (path->device->transport == XPORT_SPI
 6104                  && (inq_buf->spi3data & SID_SPI_MASK) == 0
 6105                  && path->device->transport_version > 2)
 6106                         path->device->transport_version = 2;
 6107         } else {
 6108                 struct cam_ed* otherdev;
 6109 
 6110                 for (otherdev = TAILQ_FIRST(&path->target->ed_entries);
 6111                      otherdev != NULL;
 6112                      otherdev = TAILQ_NEXT(otherdev, links)) {
 6113                         if (otherdev != path->device)
 6114                                 break;
 6115                 }
 6116                     
 6117                 if (otherdev != NULL) {
 6118                         /*
 6119                          * Initially assume the same versioning as
 6120                          * prior luns for this target.
 6121                          */
 6122                         path->device->protocol_version =
 6123                             otherdev->protocol_version;
 6124                         path->device->transport_version =
 6125                             otherdev->transport_version;
 6126                 } else {
 6127                         /* Until we know better, opt for safty */
 6128                         path->device->protocol_version = 2;
 6129                         if (path->device->transport == XPORT_SPI)
 6130                                 path->device->transport_version = 2;
 6131                         else
 6132                                 path->device->transport_version = 0;
 6133                 }
 6134         }
 6135 
 6136         /*
 6137          * XXX
 6138          * For a device compliant with SPC-2 we should be able
 6139          * to determine the transport version supported by
 6140          * scrutinizing the version descriptors in the
 6141          * inquiry buffer.
 6142          */
 6143 
 6144         /* Tell the controller what we think */
 6145         xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1);
 6146         cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
 6147         cts.type = CTS_TYPE_CURRENT_SETTINGS;
 6148         cts.transport = path->device->transport;
 6149         cts.transport_version = path->device->transport_version;
 6150         cts.protocol = path->device->protocol;
 6151         cts.protocol_version = path->device->protocol_version;
 6152         cts.proto_specific.valid = 0;
 6153         cts.xport_specific.valid = 0;
 6154         xpt_action((union ccb *)&cts);
 6155 }
 6156 
 6157 static void
 6158 xpt_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_ed *device,
 6159                           int async_update)
 6160 {
 6161         struct  ccb_pathinq cpi;
 6162         struct  ccb_trans_settings cur_cts;
 6163         struct  ccb_trans_settings_scsi *scsi;
 6164         struct  ccb_trans_settings_scsi *cur_scsi;
 6165         struct  cam_sim *sim;
 6166         struct  scsi_inquiry_data *inq_data;
 6167 
 6168         if (device == NULL) {
 6169                 cts->ccb_h.status = CAM_PATH_INVALID;
 6170                 xpt_done((union ccb *)cts);
 6171                 return;
 6172         }
 6173 
 6174         if (cts->protocol == PROTO_UNKNOWN
 6175          || cts->protocol == PROTO_UNSPECIFIED) {
 6176                 cts->protocol = device->protocol;
 6177                 cts->protocol_version = device->protocol_version;
 6178         }
 6179 
 6180         if (cts->protocol_version == PROTO_VERSION_UNKNOWN
 6181          || cts->protocol_version == PROTO_VERSION_UNSPECIFIED)
 6182                 cts->protocol_version = device->protocol_version;
 6183 
 6184         if (cts->protocol != device->protocol) {
 6185                 xpt_print_path(cts->ccb_h.path);
 6186                 printf("Uninitialized Protocol %x:%x?\n",
 6187                        cts->protocol, device->protocol);
 6188                 cts->protocol = device->protocol;
 6189         }
 6190 
 6191         if (cts->protocol_version > device->protocol_version) {
 6192                 if (bootverbose) {
 6193                         xpt_print_path(cts->ccb_h.path);
 6194                         printf("Down reving Protocol Version from %d to %d?\n",
 6195                                cts->protocol_version, device->protocol_version);
 6196                 }
 6197                 cts->protocol_version = device->protocol_version;
 6198         }
 6199 
 6200         if (cts->transport == XPORT_UNKNOWN
 6201          || cts->transport == XPORT_UNSPECIFIED) {
 6202                 cts->transport = device->transport;
 6203                 cts->transport_version = device->transport_version;
 6204         }
 6205 
 6206         if (cts->transport_version == XPORT_VERSION_UNKNOWN
 6207          || cts->transport_version == XPORT_VERSION_UNSPECIFIED)
 6208                 cts->transport_version = device->transport_version;
 6209 
 6210         if (cts->transport != device->transport) {
 6211                 xpt_print_path(cts->ccb_h.path);
 6212                 printf("Uninitialized Transport %x:%x?\n",
 6213                        cts->transport, device->transport);
 6214                 cts->transport = device->transport;
 6215         }
 6216 
 6217         if (cts->transport_version > device->transport_version) {
 6218                 if (bootverbose) {
 6219                         xpt_print_path(cts->ccb_h.path);
 6220                         printf("Down reving Transport Version from %d to %d?\n",
 6221                                cts->transport_version,
 6222                                device->transport_version);
 6223                 }
 6224                 cts->transport_version = device->transport_version;
 6225         }
 6226 
 6227         sim = cts->ccb_h.path->bus->sim;
 6228 
 6229         /*
 6230          * Nothing more of interest to do unless
 6231          * this is a device connected via the
 6232          * SCSI protocol.
 6233          */
 6234         if (cts->protocol != PROTO_SCSI) {
 6235                 if (async_update == FALSE) 
 6236                         (*(sim->sim_action))(sim, (union ccb *)cts);
 6237                 return;
 6238         }
 6239 
 6240         inq_data = &device->inq_data;
 6241         scsi = &cts->proto_specific.scsi;
 6242         xpt_setup_ccb(&cpi.ccb_h, cts->ccb_h.path, /*priority*/1);
 6243         cpi.ccb_h.func_code = XPT_PATH_INQ;
 6244         xpt_action((union ccb *)&cpi);
 6245 
 6246         /* SCSI specific sanity checking */
 6247         if ((cpi.hba_inquiry & PI_TAG_ABLE) == 0
 6248          || (inq_data->flags & SID_CmdQue) == 0
 6249          || (device->queue_flags & SCP_QUEUE_DQUE) != 0
 6250          || (device->quirk->mintags == 0)) {
 6251                 /*
 6252                  * Can't tag on hardware that doesn't support tags,
 6253                  * doesn't have it enabled, or has broken tag support.
 6254                  */
 6255                 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
 6256         }
 6257 
 6258         if (async_update == FALSE) {
 6259                 /*
 6260                  * Perform sanity checking against what the
 6261                  * controller and device can do.
 6262                  */
 6263                 xpt_setup_ccb(&cur_cts.ccb_h, cts->ccb_h.path, /*priority*/1);
 6264                 cur_cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
 6265                 cur_cts.type = cts->type;
 6266                 xpt_action((union ccb *)&cur_cts);
 6267 
 6268                 cur_scsi = &cur_cts.proto_specific.scsi;
 6269                 if ((scsi->valid & CTS_SCSI_VALID_TQ) == 0) {
 6270                         scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
 6271                         scsi->flags |= cur_scsi->flags & CTS_SCSI_FLAGS_TAG_ENB;
 6272                 }
 6273                 if ((cur_scsi->valid & CTS_SCSI_VALID_TQ) == 0)
 6274                         scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
 6275         }
 6276 
 6277         /* SPI specific sanity checking */
 6278         if (cts->transport == XPORT_SPI && async_update == FALSE) {
 6279                 u_int spi3caps;
 6280                 struct ccb_trans_settings_spi *spi;
 6281                 struct ccb_trans_settings_spi *cur_spi;
 6282 
 6283                 spi = &cts->xport_specific.spi;
 6284 
 6285                 cur_spi = &cur_cts.xport_specific.spi;
 6286 
 6287                 /* Fill in any gaps in what the user gave us */
 6288                 if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0)
 6289                         spi->sync_period = cur_spi->sync_period;
 6290                 if ((cur_spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0)
 6291                         spi->sync_period = 0;
 6292                 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0)
 6293                         spi->sync_offset = cur_spi->sync_offset;
 6294                 if ((cur_spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0)
 6295                         spi->sync_offset = 0;
 6296                 if ((spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0)
 6297                         spi->ppr_options = cur_spi->ppr_options;
 6298                 if ((cur_spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0)
 6299                         spi->ppr_options = 0;
 6300                 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) == 0)
 6301                         spi->bus_width = cur_spi->bus_width;
 6302                 if ((cur_spi->valid & CTS_SPI_VALID_BUS_WIDTH) == 0)
 6303                         spi->bus_width = 0;
 6304                 if ((spi->valid & CTS_SPI_VALID_DISC) == 0) {
 6305                         spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
 6306                         spi->flags |= cur_spi->flags & CTS_SPI_FLAGS_DISC_ENB;
 6307                 }
 6308                 if ((cur_spi->valid & CTS_SPI_VALID_DISC) == 0)
 6309                         spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
 6310                 if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0
 6311                   && (inq_data->flags & SID_Sync) == 0
 6312                   && cts->type == CTS_TYPE_CURRENT_SETTINGS)
 6313                  || ((cpi.hba_inquiry & PI_SDTR_ABLE) == 0)
 6314                  || (cur_spi->sync_offset == 0)
 6315                  || (cur_spi->sync_period == 0)) {
 6316                         /* Force async */
 6317                         spi->sync_period = 0;
 6318                         spi->sync_offset = 0;
 6319                 }
 6320 
 6321                 switch (spi->bus_width) {
 6322                 case MSG_EXT_WDTR_BUS_32_BIT:
 6323                         if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
 6324                           || (inq_data->flags & SID_WBus32) != 0
 6325                           || cts->type == CTS_TYPE_USER_SETTINGS)
 6326                          && (cpi.hba_inquiry & PI_WIDE_32) != 0)
 6327                                 break;
 6328                         /* Fall Through to 16-bit */
 6329                 case MSG_EXT_WDTR_BUS_16_BIT:
 6330                         if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
 6331                           || (inq_data->flags & SID_WBus16) != 0
 6332                           || cts->type == CTS_TYPE_USER_SETTINGS)
 6333                          && (cpi.hba_inquiry & PI_WIDE_16) != 0) {
 6334                                 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
 6335                                 break;
 6336                         }
 6337                         /* Fall Through to 8-bit */
 6338                 default: /* New bus width?? */
 6339                 case MSG_EXT_WDTR_BUS_8_BIT:
 6340                         /* All targets can do this */
 6341                         spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
 6342                         break;
 6343                 }
 6344 
 6345                 spi3caps = cpi.xport_specific.spi.ppr_options;
 6346                 if ((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0
 6347                  && cts->type == CTS_TYPE_CURRENT_SETTINGS)
 6348                         spi3caps &= inq_data->spi3data;
 6349 
 6350                 if ((spi3caps & SID_SPI_CLOCK_DT) == 0)
 6351                         spi->ppr_options &= ~MSG_EXT_PPR_DT_REQ;
 6352 
 6353                 if ((spi3caps & SID_SPI_IUS) == 0)
 6354                         spi->ppr_options &= ~MSG_EXT_PPR_IU_REQ;
 6355 
 6356                 if ((spi3caps & SID_SPI_QAS) == 0)
 6357                         spi->ppr_options &= ~MSG_EXT_PPR_QAS_REQ;
 6358 
 6359                 /* No SPI Transfer settings are allowed unless we are wide */
 6360                 if (spi->bus_width == 0)
 6361                         spi->ppr_options = 0;
 6362 
 6363                 if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) == 0) {
 6364                         /*
 6365                          * Can't tag queue without disconnection.
 6366                          */
 6367                         scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
 6368                         scsi->valid |= CTS_SCSI_VALID_TQ;
 6369                 }
 6370 
 6371                 /*
 6372                  * If we are currently performing tagged transactions to
 6373                  * this device and want to change its negotiation parameters,
 6374                  * go non-tagged for a bit to give the controller a chance to
 6375                  * negotiate unhampered by tag messages.
 6376                  */
 6377                 if (cts->type == CTS_TYPE_CURRENT_SETTINGS
 6378                  && (device->inq_flags & SID_CmdQue) != 0
 6379                  && (scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0
 6380                  && (spi->flags & (CTS_SPI_VALID_SYNC_RATE|
 6381                                    CTS_SPI_VALID_SYNC_OFFSET|
 6382                                    CTS_SPI_VALID_BUS_WIDTH)) != 0)
 6383                         xpt_toggle_tags(cts->ccb_h.path);
 6384         }
 6385 
 6386         if (cts->type == CTS_TYPE_CURRENT_SETTINGS
 6387          && (scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
 6388                 int device_tagenb;
 6389 
 6390                 /*
 6391                  * If we are transitioning from tags to no-tags or
 6392                  * vice-versa, we need to carefully freeze and restart
 6393                  * the queue so that we don't overlap tagged and non-tagged
 6394                  * commands.  We also temporarily stop tags if there is
 6395                  * a change in transfer negotiation settings to allow
 6396                  * "tag-less" negotiation.
 6397                  */
 6398                 if ((device->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
 6399                  || (device->inq_flags & SID_CmdQue) != 0)
 6400                         device_tagenb = TRUE;
 6401                 else
 6402                         device_tagenb = FALSE;
 6403 
 6404                 if (((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0
 6405                   && device_tagenb == FALSE)
 6406                  || ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) == 0
 6407                   && device_tagenb == TRUE)) {
 6408 
 6409                         if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) {
 6410                                 /*
 6411                                  * Delay change to use tags until after a
 6412                                  * few commands have gone to this device so
 6413                                  * the controller has time to perform transfer
 6414                                  * negotiations without tagged messages getting
 6415                                  * in the way.
 6416                                  */
 6417                                 device->tag_delay_count = CAM_TAG_DELAY_COUNT;
 6418                                 device->flags |= CAM_DEV_TAG_AFTER_COUNT;
 6419                         } else {
 6420                                 struct ccb_relsim crs;
 6421 
 6422                                 xpt_freeze_devq(cts->ccb_h.path, /*count*/1);
 6423                                 device->inq_flags &= ~SID_CmdQue;
 6424                                 xpt_dev_ccbq_resize(cts->ccb_h.path,
 6425                                                     sim->max_dev_openings);
 6426                                 device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
 6427                                 device->tag_delay_count = 0;
 6428 
 6429                                 xpt_setup_ccb(&crs.ccb_h, cts->ccb_h.path,
 6430                                               /*priority*/1);
 6431                                 crs.ccb_h.func_code = XPT_REL_SIMQ;
 6432                                 crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
 6433                                 crs.openings
 6434                                     = crs.release_timeout 
 6435                                     = crs.qfrozen_cnt
 6436                                     = 0;
 6437                                 xpt_action((union ccb *)&crs);
 6438                         }
 6439                 }
 6440         }
 6441         if (async_update == FALSE) 
 6442                 (*(sim->sim_action))(sim, (union ccb *)cts);
 6443 }
 6444 
 6445 #else /* CAM_NEW_TRAN_CODE */
 6446 
 6447 static void
 6448 xpt_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_ed *device,
 6449                           int async_update)
 6450 {
 6451         struct  cam_sim *sim;
 6452         int     qfrozen;
 6453 
 6454         sim = cts->ccb_h.path->bus->sim;
 6455         if (async_update == FALSE) {
 6456                 struct  scsi_inquiry_data *inq_data;
 6457                 struct  ccb_pathinq cpi;
 6458                 struct  ccb_trans_settings cur_cts;
 6459 
 6460                 if (device == NULL) {
 6461                         cts->ccb_h.status = CAM_PATH_INVALID;
 6462                         xpt_done((union ccb *)cts);
 6463                         return;
 6464                 }
 6465 
 6466                 /*
 6467                  * Perform sanity checking against what the
 6468                  * controller and device can do.
 6469                  */
 6470                 xpt_setup_ccb(&cpi.ccb_h, cts->ccb_h.path, /*priority*/1);
 6471                 cpi.ccb_h.func_code = XPT_PATH_INQ;
 6472                 xpt_action((union ccb *)&cpi);
 6473                 xpt_setup_ccb(&cur_cts.ccb_h, cts->ccb_h.path, /*priority*/1);
 6474                 cur_cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
 6475                 cur_cts.flags = CCB_TRANS_CURRENT_SETTINGS;
 6476                 xpt_action((union ccb *)&cur_cts);
 6477                 inq_data = &device->inq_data;
 6478 
 6479                 /* Fill in any gaps in what the user gave us */
 6480                 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) == 0)
 6481                         cts->sync_period = cur_cts.sync_period;
 6482                 if ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) == 0)
 6483                         cts->sync_offset = cur_cts.sync_offset;
 6484                 if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) == 0)
 6485                         cts->bus_width = cur_cts.bus_width;
 6486                 if ((cts->valid & CCB_TRANS_DISC_VALID) == 0) {
 6487                         cts->flags &= ~CCB_TRANS_DISC_ENB;
 6488                         cts->flags |= cur_cts.flags & CCB_TRANS_DISC_ENB;
 6489                 }
 6490                 if ((cts->valid & CCB_TRANS_TQ_VALID) == 0) {
 6491                         cts->flags &= ~CCB_TRANS_TAG_ENB;
 6492                         cts->flags |= cur_cts.flags & CCB_TRANS_TAG_ENB;
 6493                 }
 6494 
 6495                 if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0
 6496                   && (inq_data->flags & SID_Sync) == 0)
 6497                  || ((cpi.hba_inquiry & PI_SDTR_ABLE) == 0)
 6498                  || (cts->sync_offset == 0)
 6499                  || (cts->sync_period == 0)) {
 6500                         /* Force async */
 6501                         cts->sync_period = 0;
 6502                         cts->sync_offset = 0;
 6503                 } else if ((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0
 6504                         && (inq_data->spi3data & SID_SPI_CLOCK_DT) == 0
 6505                         && cts->sync_period <= 0x9) {
 6506                         /*
 6507                          * Don't allow DT transmission rates if the
 6508                          * device does not support it.
 6509                          */
 6510                         cts->sync_period = 0xa;
 6511                 }
 6512 
 6513                 switch (cts->bus_width) {
 6514                 case MSG_EXT_WDTR_BUS_32_BIT:
 6515                         if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
 6516                           || (inq_data->flags & SID_WBus32) != 0)
 6517                          && (cpi.hba_inquiry & PI_WIDE_32) != 0)
 6518                                 break;
 6519                         /* FALLTHROUGH to 16-bit */
 6520                 case MSG_EXT_WDTR_BUS_16_BIT:
 6521                         if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
 6522                           || (inq_data->flags & SID_WBus16) != 0)
 6523                          && (cpi.hba_inquiry & PI_WIDE_16) != 0) {
 6524                                 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
 6525                                 break;
 6526                         }
 6527                         /* FALLTHROUGH to 8-bit */
 6528                 default: /* New bus width?? */
 6529                 case MSG_EXT_WDTR_BUS_8_BIT:
 6530                         /* All targets can do this */
 6531                         cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
 6532                         break;
 6533                 }
 6534 
 6535                 if ((cts->flags & CCB_TRANS_DISC_ENB) == 0) {
 6536                         /*
 6537                          * Can't tag queue without disconnection.
 6538                          */
 6539                         cts->flags &= ~CCB_TRANS_TAG_ENB;
 6540                         cts->valid |= CCB_TRANS_TQ_VALID;
 6541                 }
 6542 
 6543                 if ((cpi.hba_inquiry & PI_TAG_ABLE) == 0
 6544                  || (inq_data->flags & SID_CmdQue) == 0
 6545                  || (device->queue_flags & SCP_QUEUE_DQUE) != 0
 6546                  || (device->quirk->mintags == 0)) {
 6547                         /*
 6548                          * Can't tag on hardware that doesn't support,
 6549                          * doesn't have it enabled, or has broken tag support.
 6550                          */
 6551                         cts->flags &= ~CCB_TRANS_TAG_ENB;
 6552                 }
 6553         }
 6554 
 6555         qfrozen = FALSE;
 6556         if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) {
 6557                 int device_tagenb;
 6558 
 6559                 /*
 6560                  * If we are transitioning from tags to no-tags or
 6561                  * vice-versa, we need to carefully freeze and restart
 6562                  * the queue so that we don't overlap tagged and non-tagged
 6563                  * commands.  We also temporarily stop tags if there is
 6564                  * a change in transfer negotiation settings to allow
 6565                  * "tag-less" negotiation.
 6566                  */
 6567                 if ((device->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
 6568                  || (device->inq_flags & SID_CmdQue) != 0)
 6569                         device_tagenb = TRUE;
 6570                 else
 6571                         device_tagenb = FALSE;
 6572 
 6573                 if (((cts->flags & CCB_TRANS_TAG_ENB) != 0
 6574                   && device_tagenb == FALSE)
 6575                  || ((cts->flags & CCB_TRANS_TAG_ENB) == 0
 6576                   && device_tagenb == TRUE)) {
 6577 
 6578                         if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) {
 6579                                 /*
 6580                                  * Delay change to use tags until after a
 6581                                  * few commands have gone to this device so
 6582                                  * the controller has time to perform transfer
 6583                                  * negotiations without tagged messages getting
 6584                                  * in the way.
 6585                                  */
 6586                                 device->tag_delay_count = CAM_TAG_DELAY_COUNT;
 6587                                 device->flags |= CAM_DEV_TAG_AFTER_COUNT;
 6588                         } else {
 6589                                 xpt_freeze_devq(cts->ccb_h.path, /*count*/1);
 6590                                 qfrozen = TRUE;
 6591                                 device->inq_flags &= ~SID_CmdQue;
 6592                                 xpt_dev_ccbq_resize(cts->ccb_h.path,
 6593                                                     sim->max_dev_openings);
 6594                                 device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
 6595                                 device->tag_delay_count = 0;
 6596                         }
 6597                 }
 6598         }
 6599 
 6600         if (async_update == FALSE) {
 6601                 /*
 6602                  * If we are currently performing tagged transactions to
 6603                  * this device and want to change its negotiation parameters,
 6604                  * go non-tagged for a bit to give the controller a chance to
 6605                  * negotiate unhampered by tag messages.
 6606                  */
 6607                 if ((device->inq_flags & SID_CmdQue) != 0
 6608                  && (cts->flags & (CCB_TRANS_SYNC_RATE_VALID|
 6609                                    CCB_TRANS_SYNC_OFFSET_VALID|
 6610                                    CCB_TRANS_BUS_WIDTH_VALID)) != 0)
 6611                         xpt_toggle_tags(cts->ccb_h.path);
 6612 
 6613                 (*(sim->sim_action))(sim, (union ccb *)cts);
 6614         }
 6615 
 6616         if (qfrozen) {
 6617                 struct ccb_relsim crs;
 6618 
 6619                 xpt_setup_ccb(&crs.ccb_h, cts->ccb_h.path,
 6620                               /*priority*/1);
 6621                 crs.ccb_h.func_code = XPT_REL_SIMQ;
 6622                 crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
 6623                 crs.openings
 6624                     = crs.release_timeout 
 6625                     = crs.qfrozen_cnt
 6626                     = 0;
 6627                 xpt_action((union ccb *)&crs);
 6628         }
 6629 }
 6630 
 6631 
 6632 #endif /* CAM_NEW_TRAN_CODE */
 6633 
 6634 static void
 6635 xpt_toggle_tags(struct cam_path *path)
 6636 {
 6637         struct cam_ed *dev;
 6638 
 6639         /*
 6640          * Give controllers a chance to renegotiate
 6641          * before starting tag operations.  We
 6642          * "toggle" tagged queuing off then on
 6643          * which causes the tag enable command delay
 6644          * counter to come into effect.
 6645          */
 6646         dev = path->device;
 6647         if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
 6648          || ((dev->inq_flags & SID_CmdQue) != 0
 6649           && (dev->inq_flags & (SID_Sync|SID_WBus16|SID_WBus32)) != 0)) {
 6650                 struct ccb_trans_settings cts;
 6651 
 6652                 xpt_setup_ccb(&cts.ccb_h, path, 1);
 6653 #ifdef CAM_NEW_TRAN_CODE
 6654                 cts.protocol = PROTO_SCSI;
 6655                 cts.protocol_version = PROTO_VERSION_UNSPECIFIED;
 6656                 cts.transport = XPORT_UNSPECIFIED;
 6657                 cts.transport_version = XPORT_VERSION_UNSPECIFIED;
 6658                 cts.proto_specific.scsi.flags = 0;
 6659                 cts.proto_specific.scsi.valid = CTS_SCSI_VALID_TQ;
 6660 #else /* CAM_NEW_TRAN_CODE */
 6661                 cts.flags = 0;
 6662                 cts.valid = CCB_TRANS_TQ_VALID;
 6663 #endif /* CAM_NEW_TRAN_CODE */
 6664                 xpt_set_transfer_settings(&cts, path->device,
 6665                                           /*async_update*/TRUE);
 6666 #ifdef CAM_NEW_TRAN_CODE
 6667                 cts.proto_specific.scsi.flags = CTS_SCSI_FLAGS_TAG_ENB;
 6668 #else /* CAM_NEW_TRAN_CODE */
 6669                 cts.flags = CCB_TRANS_TAG_ENB;
 6670 #endif /* CAM_NEW_TRAN_CODE */
 6671                 xpt_set_transfer_settings(&cts, path->device,
 6672                                           /*async_update*/TRUE);
 6673         }
 6674 }
 6675 
 6676 static void
 6677 xpt_start_tags(struct cam_path *path)
 6678 {
 6679         struct ccb_relsim crs;
 6680         struct cam_ed *device;
 6681         struct cam_sim *sim;
 6682         int    newopenings;
 6683 
 6684         device = path->device;
 6685         sim = path->bus->sim;
 6686         device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
 6687         xpt_freeze_devq(path, /*count*/1);
 6688         device->inq_flags |= SID_CmdQue;
 6689         newopenings = min(device->quirk->maxtags, sim->max_tagged_dev_openings);
 6690         xpt_dev_ccbq_resize(path, newopenings);
 6691         xpt_setup_ccb(&crs.ccb_h, path, /*priority*/1);
 6692         crs.ccb_h.func_code = XPT_REL_SIMQ;
 6693         crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
 6694         crs.openings
 6695             = crs.release_timeout 
 6696             = crs.qfrozen_cnt
 6697             = 0;
 6698         xpt_action((union ccb *)&crs);
 6699 }
 6700 
 6701 static int busses_to_config;
 6702 static int busses_to_reset;
 6703 
 6704 static int
 6705 xptconfigbuscountfunc(struct cam_eb *bus, void *arg)
 6706 {
 6707         if (bus->path_id != CAM_XPT_PATH_ID) {
 6708                 struct cam_path path;
 6709                 struct ccb_pathinq cpi;
 6710                 int can_negotiate;
 6711 
 6712                 busses_to_config++;
 6713                 xpt_compile_path(&path, NULL, bus->path_id,
 6714                                  CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
 6715                 xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
 6716                 cpi.ccb_h.func_code = XPT_PATH_INQ;
 6717                 xpt_action((union ccb *)&cpi);
 6718                 can_negotiate = cpi.hba_inquiry;
 6719                 can_negotiate &= (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE);
 6720                 if ((cpi.hba_misc & PIM_NOBUSRESET) == 0
 6721                  && can_negotiate)
 6722                         busses_to_reset++;
 6723                 xpt_release_path(&path);
 6724         }
 6725 
 6726         return(1);
 6727 }
 6728 
 6729 static int
 6730 xptconfigfunc(struct cam_eb *bus, void *arg)
 6731 {
 6732         struct  cam_path *path;
 6733         union   ccb *work_ccb;
 6734 
 6735         if (bus->path_id != CAM_XPT_PATH_ID) {
 6736                 cam_status status;
 6737                 int can_negotiate;
 6738 
 6739                 work_ccb = xpt_alloc_ccb();
 6740                 if ((status = xpt_create_path(&path, xpt_periph, bus->path_id,
 6741                                               CAM_TARGET_WILDCARD,
 6742                                               CAM_LUN_WILDCARD)) !=CAM_REQ_CMP){
 6743                         printf("xptconfigfunc: xpt_create_path failed with "
 6744                                "status %#x for bus %d\n", status, bus->path_id);
 6745                         printf("xptconfigfunc: halting bus configuration\n");
 6746                         xpt_free_ccb(work_ccb);
 6747                         busses_to_config--;
 6748                         xpt_finishconfig(xpt_periph, NULL);
 6749                         return(0);
 6750                 }
 6751                 xpt_setup_ccb(&work_ccb->ccb_h, path, /*priority*/1);
 6752                 work_ccb->ccb_h.func_code = XPT_PATH_INQ;
 6753                 xpt_action(work_ccb);
 6754                 if (work_ccb->ccb_h.status != CAM_REQ_CMP) {
 6755                         printf("xptconfigfunc: CPI failed on bus %d "
 6756                                "with status %d\n", bus->path_id,
 6757                                work_ccb->ccb_h.status);
 6758                         xpt_finishconfig(xpt_periph, work_ccb);
 6759                         return(1);
 6760                 }
 6761 
 6762                 can_negotiate = work_ccb->cpi.hba_inquiry;
 6763                 can_negotiate &= (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE);
 6764                 if ((work_ccb->cpi.hba_misc & PIM_NOBUSRESET) == 0
 6765                  && (can_negotiate != 0)) {
 6766                         xpt_setup_ccb(&work_ccb->ccb_h, path, /*priority*/1);
 6767                         work_ccb->ccb_h.func_code = XPT_RESET_BUS;
 6768                         work_ccb->ccb_h.cbfcnp = NULL;
 6769                         CAM_DEBUG(path, CAM_DEBUG_SUBTRACE,
 6770                                   ("Resetting Bus\n"));
 6771                         xpt_action(work_ccb);
 6772                         xpt_finishconfig(xpt_periph, work_ccb);
 6773                 } else {
 6774                         /* Act as though we performed a successful BUS RESET */
 6775                         work_ccb->ccb_h.func_code = XPT_RESET_BUS;
 6776                         xpt_finishconfig(xpt_periph, work_ccb);
 6777                 }
 6778         }
 6779 
 6780         return(1);
 6781 }
 6782 
 6783 static void
 6784 xpt_config(void *arg)
 6785 {
 6786         /*
 6787          * Now that interrupts are enabled, go find our devices
 6788          */
 6789 
 6790 #ifdef CAMDEBUG
 6791         /* Setup debugging flags and path */
 6792 #ifdef CAM_DEBUG_FLAGS
 6793         cam_dflags = CAM_DEBUG_FLAGS;
 6794 #else /* !CAM_DEBUG_FLAGS */
 6795         cam_dflags = CAM_DEBUG_NONE;
 6796 #endif /* CAM_DEBUG_FLAGS */
 6797 #ifdef CAM_DEBUG_BUS
 6798         if (cam_dflags != CAM_DEBUG_NONE) {
 6799                 if (xpt_create_path(&cam_dpath, xpt_periph,
 6800                                     CAM_DEBUG_BUS, CAM_DEBUG_TARGET,
 6801                                     CAM_DEBUG_LUN) != CAM_REQ_CMP) {
 6802                         printf("xpt_config: xpt_create_path() failed for debug"
 6803                                " target %d:%d:%d, debugging disabled\n",
 6804                                CAM_DEBUG_BUS, CAM_DEBUG_TARGET, CAM_DEBUG_LUN);
 6805                         cam_dflags = CAM_DEBUG_NONE;
 6806                 }
 6807         } else
 6808                 cam_dpath = NULL;
 6809 #else /* !CAM_DEBUG_BUS */
 6810         cam_dpath = NULL;
 6811 #endif /* CAM_DEBUG_BUS */
 6812 #endif /* CAMDEBUG */
 6813 
 6814         /*
 6815          * Scan all installed busses.
 6816          */
 6817         xpt_for_all_busses(xptconfigbuscountfunc, NULL);
 6818 
 6819         if (busses_to_config == 0) {
 6820                 /* Call manually because we don't have any busses */
 6821                 xpt_finishconfig(xpt_periph, NULL);
 6822         } else  {
 6823                 if (busses_to_reset > 0 && scsi_delay >= 2000) {
 6824                         printf("Waiting %d seconds for SCSI "
 6825                                "devices to settle\n", scsi_delay/1000);
 6826                 }
 6827                 xpt_for_all_busses(xptconfigfunc, NULL);
 6828         }
 6829 }
 6830 
 6831 /*
 6832  * If the given device only has one peripheral attached to it, and if that
 6833  * peripheral is the passthrough driver, announce it.  This insures that the
 6834  * user sees some sort of announcement for every peripheral in their system.
 6835  */
 6836 static int
 6837 xptpassannouncefunc(struct cam_ed *device, void *arg)
 6838 {
 6839         struct cam_periph *periph;
 6840         int i;
 6841 
 6842         for (periph = SLIST_FIRST(&device->periphs), i = 0; periph != NULL;
 6843              periph = SLIST_NEXT(periph, periph_links), i++);
 6844 
 6845         periph = SLIST_FIRST(&device->periphs);
 6846         if ((i == 1)
 6847          && (strncmp(periph->periph_name, "pass", 4) == 0))
 6848                 xpt_announce_periph(periph, NULL);
 6849 
 6850         return(1);
 6851 }
 6852 
 6853 static void
 6854 xpt_finishconfig(struct cam_periph *periph, union ccb *done_ccb)
 6855 {
 6856         struct  periph_driver **p_drv;
 6857         int     i;
 6858 
 6859         if (done_ccb != NULL) {
 6860                 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE,
 6861                           ("xpt_finishconfig\n"));
 6862                 switch(done_ccb->ccb_h.func_code) {
 6863                 case XPT_RESET_BUS:
 6864                         if (done_ccb->ccb_h.status == CAM_REQ_CMP) {
 6865                                 done_ccb->ccb_h.func_code = XPT_SCAN_BUS;
 6866                                 done_ccb->ccb_h.cbfcnp = xpt_finishconfig;
 6867                                 xpt_action(done_ccb);
 6868                                 return;
 6869                         }
 6870                         /* FALLTHROUGH */
 6871                 case XPT_SCAN_BUS:
 6872                 default:
 6873                         xpt_free_path(done_ccb->ccb_h.path);
 6874                         busses_to_config--;
 6875                         break;
 6876                 }
 6877         }
 6878 
 6879         if (busses_to_config == 0) {
 6880                 /* Register all the peripheral drivers */
 6881                 /* XXX This will have to change when we have loadable modules */
 6882                 p_drv = periph_drivers;
 6883                 for (i = 0; p_drv[i] != NULL; i++) {
 6884                         (*p_drv[i]->init)();
 6885                 }
 6886 
 6887                 /*
 6888                  * Check for devices with no "standard" peripheral driver
 6889                  * attached.  For any devices like that, announce the
 6890                  * passthrough driver so the user will see something.
 6891                  */
 6892                 xpt_for_all_devices(xptpassannouncefunc, NULL);
 6893 
 6894                 /* Release our hook so that the boot can continue. */
 6895                 config_intrhook_disestablish(xpt_config_hook);
 6896                 free(xpt_config_hook, M_TEMP);
 6897                 xpt_config_hook = NULL;
 6898         }
 6899         if (done_ccb != NULL)
 6900                 xpt_free_ccb(done_ccb);
 6901 }
 6902 
 6903 static void
 6904 xptaction(struct cam_sim *sim, union ccb *work_ccb)
 6905 {
 6906         CAM_DEBUG(work_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xptaction\n"));
 6907 
 6908         switch (work_ccb->ccb_h.func_code) {
 6909         /* Common cases first */
 6910         case XPT_PATH_INQ:              /* Path routing inquiry */
 6911         {
 6912                 struct ccb_pathinq *cpi;
 6913 
 6914                 cpi = &work_ccb->cpi;
 6915                 cpi->version_num = 1; /* XXX??? */
 6916                 cpi->hba_inquiry = 0;
 6917                 cpi->target_sprt = 0;
 6918                 cpi->hba_misc = 0;
 6919                 cpi->hba_eng_cnt = 0;
 6920                 cpi->max_target = 0;
 6921                 cpi->max_lun = 0;
 6922                 cpi->initiator_id = 0;
 6923                 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
 6924                 strncpy(cpi->hba_vid, "", HBA_IDLEN);
 6925                 strncpy(cpi->dev_name, sim->sim_name, DEV_IDLEN);
 6926                 cpi->unit_number = sim->unit_number;
 6927                 cpi->bus_id = sim->bus_id;
 6928                 cpi->base_transfer_speed = 0;
 6929 #ifdef CAM_NEW_TRAN_CODE
 6930                 cpi->protocol = PROTO_UNSPECIFIED;
 6931                 cpi->protocol_version = PROTO_VERSION_UNSPECIFIED;
 6932                 cpi->transport = XPORT_UNSPECIFIED;
 6933                 cpi->transport_version = XPORT_VERSION_UNSPECIFIED;
 6934 #endif /* CAM_NEW_TRAN_CODE */
 6935                 cpi->ccb_h.status = CAM_REQ_CMP;
 6936                 xpt_done(work_ccb);
 6937                 break;
 6938         }
 6939         default:
 6940                 work_ccb->ccb_h.status = CAM_REQ_INVALID;
 6941                 xpt_done(work_ccb);
 6942                 break;
 6943         }
 6944 }
 6945 
 6946 /*
 6947  * The xpt as a "controller" has no interrupt sources, so polling
 6948  * is a no-op.
 6949  */
 6950 static void
 6951 xptpoll(struct cam_sim *sim)
 6952 {
 6953 }
 6954 
 6955 static void
 6956 camisr(void *V_queue)
 6957 {
 6958         cam_isrq_t *queue = V_queue;
 6959         int     s;
 6960         struct  ccb_hdr *ccb_h;
 6961 
 6962         s = splcam();
 6963         while ((ccb_h = TAILQ_FIRST(queue)) != NULL) {
 6964                 int     runq;
 6965 
 6966                 TAILQ_REMOVE(queue, ccb_h, sim_links.tqe);
 6967                 ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
 6968                 splx(s);
 6969 
 6970                 CAM_DEBUG(ccb_h->path, CAM_DEBUG_TRACE,
 6971                           ("camisr\n"));
 6972 
 6973                 runq = FALSE;
 6974 
 6975                 if (ccb_h->flags & CAM_HIGH_POWER) {
 6976                         struct highpowerlist    *hphead;
 6977                         union ccb               *send_ccb;
 6978 
 6979                         hphead = &highpowerq;
 6980 
 6981                         send_ccb = (union ccb *)STAILQ_FIRST(hphead);
 6982 
 6983                         /*
 6984                          * Increment the count since this command is done.
 6985                          */
 6986                         num_highpower++;
 6987 
 6988                         /* 
 6989                          * Any high powered commands queued up?
 6990                          */
 6991                         if (send_ccb != NULL) {
 6992 
 6993                                 STAILQ_REMOVE_HEAD(hphead, xpt_links.stqe);
 6994 
 6995                                 xpt_release_devq(send_ccb->ccb_h.path,
 6996                                                  /*count*/1, /*runqueue*/TRUE);
 6997                         }
 6998                 }
 6999                 if ((ccb_h->func_code & XPT_FC_USER_CCB) == 0) {
 7000                         struct cam_ed *dev;
 7001 
 7002                         dev = ccb_h->path->device;
 7003 
 7004                         s = splcam();
 7005                         cam_ccbq_ccb_done(&dev->ccbq, (union ccb *)ccb_h);
 7006 
 7007                         ccb_h->path->bus->sim->devq->send_active--;
 7008                         ccb_h->path->bus->sim->devq->send_openings++;
 7009                         splx(s);
 7010                         
 7011                         if (((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0
 7012                           && (ccb_h->status&CAM_STATUS_MASK) != CAM_REQUEUE_REQ)
 7013                          || ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
 7014                           && (dev->ccbq.dev_active == 0))) {
 7015                                 
 7016                                 xpt_release_devq(ccb_h->path, /*count*/1,
 7017                                                  /*run_queue*/TRUE);
 7018                         }
 7019 
 7020                         if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
 7021                          && (--dev->tag_delay_count == 0))
 7022                                 xpt_start_tags(ccb_h->path);
 7023 
 7024                         if ((dev->ccbq.queue.entries > 0)
 7025                          && (dev->qfrozen_cnt == 0)
 7026                          && (device_is_send_queued(dev) == 0)) {
 7027                                 runq = xpt_schedule_dev_sendq(ccb_h->path->bus,
 7028                                                               dev);
 7029                         }
 7030                 }
 7031 
 7032                 if (ccb_h->status & CAM_RELEASE_SIMQ) {
 7033                         xpt_release_simq(ccb_h->path->bus->sim,
 7034                                          /*run_queue*/TRUE);
 7035                         ccb_h->status &= ~CAM_RELEASE_SIMQ;
 7036                         runq = FALSE;
 7037                 } 
 7038 
 7039                 if ((ccb_h->flags & CAM_DEV_QFRZDIS)
 7040                  && (ccb_h->status & CAM_DEV_QFRZN)) {
 7041                         xpt_release_devq(ccb_h->path, /*count*/1,
 7042                                          /*run_queue*/TRUE);
 7043                         ccb_h->status &= ~CAM_DEV_QFRZN;
 7044                 } else if (runq) {
 7045                         xpt_run_dev_sendq(ccb_h->path->bus);
 7046                 }
 7047 
 7048                 /* Call the peripheral driver's callback */
 7049                 (*ccb_h->cbfcnp)(ccb_h->path->periph, (union ccb *)ccb_h);
 7050 
 7051                 /* Raise IPL for while test */
 7052                 s = splcam();
 7053         }
 7054         splx(s);
 7055 }

Cache object: f2bae3baf0fbfa0a8a701b42ca50f532


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.