The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/cam/cam_xpt.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Implementation of the Common Access Method Transport (XPT) layer.
    3  *
    4  * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs.
    5  * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry.
    6  * All rights reserved.
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions, and the following disclaimer,
   13  *    without modification, immediately at the beginning of the file.
   14  * 2. The name of the author may not be used to endorse or promote products
   15  *    derived from this software without specific prior written permission.
   16  *
   17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   20  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
   21  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   27  * SUCH DAMAGE.
   28  *
   29  * $FreeBSD: releng/5.1/sys/cam/cam_xpt.c 111979 2003-03-08 08:01:31Z phk $
   30  */
   31 #include <sys/param.h>
   32 #include <sys/bus.h>
   33 #include <sys/systm.h>
   34 #include <sys/types.h>
   35 #include <sys/malloc.h>
   36 #include <sys/kernel.h>
   37 #include <sys/time.h>
   38 #include <sys/conf.h>
   39 #include <sys/fcntl.h>
   40 #include <sys/md5.h>
   41 #include <sys/interrupt.h>
   42 #include <sys/sbuf.h>
   43 
   44 #ifdef PC98
   45 #include <pc98/pc98/pc98_machdep.h>     /* geometry translation */
   46 #endif
   47 
   48 #include <cam/cam.h>
   49 #include <cam/cam_ccb.h>
   50 #include <cam/cam_periph.h>
   51 #include <cam/cam_sim.h>
   52 #include <cam/cam_xpt.h>
   53 #include <cam/cam_xpt_sim.h>
   54 #include <cam/cam_xpt_periph.h>
   55 #include <cam/cam_debug.h>
   56 
   57 #include <cam/scsi/scsi_all.h>
   58 #include <cam/scsi/scsi_message.h>
   59 #include <cam/scsi/scsi_pass.h>
   60 #include "opt_cam.h"
   61 
   62 /* Datastructures internal to the xpt layer */
   63 
   64 /*
   65  * Definition of an async handler callback block.  These are used to add
   66  * SIMs and peripherals to the async callback lists.
   67  */
   68 struct async_node {
   69         SLIST_ENTRY(async_node) links;
   70         u_int32_t       event_enable;   /* Async Event enables */
   71         void            (*callback)(void *arg, u_int32_t code,
   72                                     struct cam_path *path, void *args);
   73         void            *callback_arg;
   74 };
   75 
   76 SLIST_HEAD(async_list, async_node);
   77 SLIST_HEAD(periph_list, cam_periph);
   78 static STAILQ_HEAD(highpowerlist, ccb_hdr) highpowerq;
   79 
   80 /*
   81  * This is the maximum number of high powered commands (e.g. start unit)
   82  * that can be outstanding at a particular time.
   83  */
   84 #ifndef CAM_MAX_HIGHPOWER
   85 #define CAM_MAX_HIGHPOWER  4
   86 #endif
   87 
   88 /* number of high powered commands that can go through right now */
   89 static int num_highpower = CAM_MAX_HIGHPOWER;
   90 
   91 /*
   92  * Structure for queueing a device in a run queue.
   93  * There is one run queue for allocating new ccbs,
   94  * and another for sending ccbs to the controller.
   95  */
   96 struct cam_ed_qinfo {
   97         cam_pinfo pinfo;
   98         struct    cam_ed *device;
   99 };
  100 
  101 /*
  102  * The CAM EDT (Existing Device Table) contains the device information for
  103  * all devices for all busses in the system.  The table contains a
  104  * cam_ed structure for each device on the bus.
  105  */
  106 struct cam_ed {
  107         TAILQ_ENTRY(cam_ed) links;
  108         struct  cam_ed_qinfo alloc_ccb_entry;
  109         struct  cam_ed_qinfo send_ccb_entry;
  110         struct  cam_et   *target;
  111         lun_id_t         lun_id;
  112         struct  camq drvq;              /*
  113                                          * Queue of type drivers wanting to do
  114                                          * work on this device.
  115                                          */
  116         struct  cam_ccbq ccbq;          /* Queue of pending ccbs */
  117         struct  async_list asyncs;      /* Async callback info for this B/T/L */
  118         struct  periph_list periphs;    /* All attached devices */
  119         u_int   generation;             /* Generation number */
  120         struct  cam_periph *owner;      /* Peripheral driver's ownership tag */
  121         struct  xpt_quirk_entry *quirk; /* Oddities about this device */
  122                                         /* Storage for the inquiry data */
  123 #ifdef CAM_NEW_TRAN_CODE
  124         cam_proto        protocol;
  125         u_int            protocol_version;
  126         cam_xport        transport;
  127         u_int            transport_version;
  128 #endif /* CAM_NEW_TRAN_CODE */
  129         struct           scsi_inquiry_data inq_data;
  130         u_int8_t         inq_flags;     /*
  131                                          * Current settings for inquiry flags.
  132                                          * This allows us to override settings
  133                                          * like disconnection and tagged
  134                                          * queuing for a device.
  135                                          */
  136         u_int8_t         queue_flags;   /* Queue flags from the control page */
  137         u_int8_t         serial_num_len;
  138         u_int8_t        *serial_num;
  139         u_int32_t        qfrozen_cnt;
  140         u_int32_t        flags;
  141 #define CAM_DEV_UNCONFIGURED            0x01
  142 #define CAM_DEV_REL_TIMEOUT_PENDING     0x02
  143 #define CAM_DEV_REL_ON_COMPLETE         0x04
  144 #define CAM_DEV_REL_ON_QUEUE_EMPTY      0x08
  145 #define CAM_DEV_RESIZE_QUEUE_NEEDED     0x10
  146 #define CAM_DEV_TAG_AFTER_COUNT         0x20
  147 #define CAM_DEV_INQUIRY_DATA_VALID      0x40
  148         u_int32_t        tag_delay_count;
  149 #define CAM_TAG_DELAY_COUNT             5
  150         u_int32_t        refcount;
  151         struct           callout_handle c_handle;
  152 };
  153 
  154 /*
  155  * Each target is represented by an ET (Existing Target).  These
  156  * entries are created when a target is successfully probed with an
  157  * identify, and removed when a device fails to respond after a number
  158  * of retries, or a bus rescan finds the device missing.
  159  */
  160 struct cam_et { 
  161         TAILQ_HEAD(, cam_ed) ed_entries;
  162         TAILQ_ENTRY(cam_et) links;
  163         struct  cam_eb  *bus;   
  164         target_id_t     target_id;
  165         u_int32_t       refcount;       
  166         u_int           generation;
  167         struct          timeval last_reset;
  168 };
  169 
  170 /*
  171  * Each bus is represented by an EB (Existing Bus).  These entries
  172  * are created by calls to xpt_bus_register and deleted by calls to
  173  * xpt_bus_deregister.
  174  */
  175 struct cam_eb { 
  176         TAILQ_HEAD(, cam_et) et_entries;
  177         TAILQ_ENTRY(cam_eb)  links;
  178         path_id_t            path_id;
  179         struct cam_sim       *sim;
  180         struct timeval       last_reset;
  181         u_int32_t            flags;
  182 #define CAM_EB_RUNQ_SCHEDULED   0x01
  183         u_int32_t            refcount;
  184         u_int                generation;
  185 };
  186 
  187 struct cam_path {
  188         struct cam_periph *periph;
  189         struct cam_eb     *bus;
  190         struct cam_et     *target;
  191         struct cam_ed     *device;
  192 };
  193 
  194 struct xpt_quirk_entry {
  195         struct scsi_inquiry_pattern inq_pat;
  196         u_int8_t quirks;
  197 #define CAM_QUIRK_NOLUNS        0x01
  198 #define CAM_QUIRK_NOSERIAL      0x02
  199 #define CAM_QUIRK_HILUNS        0x04
  200         u_int mintags;
  201         u_int maxtags;
  202 };
  203 #define CAM_SCSI2_MAXLUN        8
  204 
  205 typedef enum {
  206         XPT_FLAG_OPEN           = 0x01
  207 } xpt_flags;
  208 
  209 struct xpt_softc {
  210         xpt_flags       flags;
  211         u_int32_t       generation;
  212 };
  213 
  214 static const char quantum[] = "QUANTUM";
  215 static const char sony[] = "SONY";
  216 static const char west_digital[] = "WDIGTL";
  217 static const char samsung[] = "SAMSUNG";
  218 static const char seagate[] = "SEAGATE";
  219 static const char microp[] = "MICROP";
  220 
  221 static struct xpt_quirk_entry xpt_quirk_table[] = 
  222 {
  223         {
  224                 /* Reports QUEUE FULL for temporary resource shortages */
  225                 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP39100*", "*" },
  226                 /*quirks*/0, /*mintags*/24, /*maxtags*/32
  227         },
  228         {
  229                 /* Reports QUEUE FULL for temporary resource shortages */
  230                 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP34550*", "*" },
  231                 /*quirks*/0, /*mintags*/24, /*maxtags*/32
  232         },
  233         {
  234                 /* Reports QUEUE FULL for temporary resource shortages */
  235                 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP32275*", "*" },
  236                 /*quirks*/0, /*mintags*/24, /*maxtags*/32
  237         },
  238         {
  239                 /* Broken tagged queuing drive */
  240                 { T_DIRECT, SIP_MEDIA_FIXED, microp, "4421-07*", "*" },
  241                 /*quirks*/0, /*mintags*/0, /*maxtags*/
  242         },
  243         {
  244                 /* Broken tagged queuing drive */
  245                 { T_DIRECT, SIP_MEDIA_FIXED, "HP", "C372*", "*" },
  246                 /*quirks*/0, /*mintags*/0, /*maxtags*/
  247         },
  248         {
  249                 /* Broken tagged queuing drive */
  250                 { T_DIRECT, SIP_MEDIA_FIXED, microp, "3391*", "x43h" },
  251                 /*quirks*/0, /*mintags*/0, /*maxtags*/
  252         },
  253         {
  254                 /*
  255                  * Unfortunately, the Quantum Atlas III has the same
  256                  * problem as the Atlas II drives above.
  257                  * Reported by: "Johan Granlund" <johan@granlund.nu>
  258                  *
  259                  * For future reference, the drive with the problem was:
  260                  * QUANTUM QM39100TD-SW N1B0
  261                  * 
  262                  * It's possible that Quantum will fix the problem in later
  263                  * firmware revisions.  If that happens, the quirk entry
  264                  * will need to be made specific to the firmware revisions
  265                  * with the problem.
  266                  * 
  267                  */
  268                 /* Reports QUEUE FULL for temporary resource shortages */
  269                 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM39100*", "*" },
  270                 /*quirks*/0, /*mintags*/24, /*maxtags*/32
  271         },
  272         {
  273                 /*
  274                  * 18 Gig Atlas III, same problem as the 9G version.
  275                  * Reported by: Andre Albsmeier
  276                  *              <andre.albsmeier@mchp.siemens.de>
  277                  *
  278                  * For future reference, the drive with the problem was:
  279                  * QUANTUM QM318000TD-S N491
  280                  */
  281                 /* Reports QUEUE FULL for temporary resource shortages */
  282                 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM318000*", "*" },
  283                 /*quirks*/0, /*mintags*/24, /*maxtags*/32
  284         },
  285         {
  286                 /*
  287                  * Broken tagged queuing drive
  288                  * Reported by: Bret Ford <bford@uop.cs.uop.edu>
  289                  *         and: Martin Renters <martin@tdc.on.ca>
  290                  */
  291                 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST410800*", "71*" },
  292                 /*quirks*/0, /*mintags*/0, /*maxtags*/
  293         },
  294                 /*
  295                  * The Seagate Medalist Pro drives have very poor write
  296                  * performance with anything more than 2 tags.
  297                  * 
  298                  * Reported by:  Paul van der Zwan <paulz@trantor.xs4all.nl>
  299                  * Drive:  <SEAGATE ST36530N 1444>
  300                  *
  301                  * Reported by:  Jeremy Lea <reg@shale.csir.co.za>
  302                  * Drive:  <SEAGATE ST34520W 1281>
  303                  *
  304                  * No one has actually reported that the 9G version
  305                  * (ST39140*) of the Medalist Pro has the same problem, but
  306                  * we're assuming that it does because the 4G and 6.5G
  307                  * versions of the drive are broken.
  308                  */
  309         {
  310                 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST34520*", "*"},
  311                 /*quirks*/0, /*mintags*/2, /*maxtags*/2
  312         },
  313         {
  314                 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST36530*", "*"},
  315                 /*quirks*/0, /*mintags*/2, /*maxtags*/2
  316         },
  317         {
  318                 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST39140*", "*"},
  319                 /*quirks*/0, /*mintags*/2, /*maxtags*/2
  320         },
  321         {
  322                 /*
  323                  * Slow when tagged queueing is enabled.  Write performance
  324                  * steadily drops off with more and more concurrent
  325                  * transactions.  Best sequential write performance with
  326                  * tagged queueing turned off and write caching turned on.
  327                  *
  328                  * PR:  kern/10398
  329                  * Submitted by:  Hideaki Okada <hokada@isl.melco.co.jp>
  330                  * Drive:  DCAS-34330 w/ "S65A" firmware.
  331                  *
  332                  * The drive with the problem had the "S65A" firmware
  333                  * revision, and has also been reported (by Stephen J.
  334                  * Roznowski <sjr@home.net>) for a drive with the "S61A"
  335                  * firmware revision.
  336                  *
  337                  * Although no one has reported problems with the 2 gig
  338                  * version of the DCAS drive, the assumption is that it
  339                  * has the same problems as the 4 gig version.  Therefore
  340                  * this quirk entries disables tagged queueing for all
  341                  * DCAS drives.
  342                  */
  343                 { T_DIRECT, SIP_MEDIA_FIXED, "IBM", "DCAS*", "*" },
  344                 /*quirks*/0, /*mintags*/0, /*maxtags*/
  345         },
  346         {
  347                 /* Broken tagged queuing drive */
  348                 { T_DIRECT, SIP_MEDIA_REMOVABLE, "iomega", "jaz*", "*" },
  349                 /*quirks*/0, /*mintags*/0, /*maxtags*/
  350         },
  351         {
  352                 /* Broken tagged queuing drive */ 
  353                 { T_DIRECT, SIP_MEDIA_FIXED, "CONNER", "CFP2107*", "*" },
  354                 /*quirks*/0, /*mintags*/0, /*maxtags*/
  355         },
  356         {
  357                 /*
  358                  * Broken tagged queuing drive.
  359                  * Submitted by:
  360                  * NAKAJI Hiroyuki <nakaji@zeisei.dpri.kyoto-u.ac.jp>
  361                  * in PR kern/9535
  362                  */
  363                 { T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN34324U*", "*" },
  364                 /*quirks*/0, /*mintags*/0, /*maxtags*/
  365         },
  366         {
  367                 /*
  368                  * Slow when tagged queueing is enabled. (1.5MB/sec versus
  369                  * 8MB/sec.)
  370                  * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
  371                  * Best performance with these drives is achieved with
  372                  * tagged queueing turned off, and write caching turned on.
  373                  */
  374                 { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "WDE*", "*" },
  375                 /*quirks*/0, /*mintags*/0, /*maxtags*/
  376         },
  377         {
  378                 /*
  379                  * Slow when tagged queueing is enabled. (1.5MB/sec versus
  380                  * 8MB/sec.)
  381                  * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
  382                  * Best performance with these drives is achieved with
  383                  * tagged queueing turned off, and write caching turned on.
  384                  */
  385                 { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "ENTERPRISE", "*" },
  386                 /*quirks*/0, /*mintags*/0, /*maxtags*/
  387         },
  388         {
  389                 /*
  390                  * Doesn't handle queue full condition correctly,
  391                  * so we need to limit maxtags to what the device
  392                  * can handle instead of determining this automatically.
  393                  */
  394                 { T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN321010S*", "*" },
  395                 /*quirks*/0, /*mintags*/2, /*maxtags*/32
  396         },
  397         {
  398                 /* Really only one LUN */
  399                 { T_ENCLOSURE, SIP_MEDIA_FIXED, "SUN", "SENA", "*" },
  400                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
  401         },
  402         {
  403                 /* I can't believe we need a quirk for DPT volumes. */
  404                 { T_ANY, SIP_MEDIA_FIXED|SIP_MEDIA_REMOVABLE, "DPT", "*", "*" },
  405                 CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS,
  406                 /*mintags*/0, /*maxtags*/255
  407         },
  408         {
  409                 /*
  410                  * Many Sony CDROM drives don't like multi-LUN probing.
  411                  */
  412                 { T_CDROM, SIP_MEDIA_REMOVABLE, sony, "CD-ROM CDU*", "*" },
  413                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
  414         },
  415         {
  416                 /*
  417                  * This drive doesn't like multiple LUN probing.
  418                  * Submitted by:  Parag Patel <parag@cgt.com>
  419                  */
  420                 { T_WORM, SIP_MEDIA_REMOVABLE, sony, "CD-R   CDU9*", "*" },
  421                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
  422         },
  423         {
  424                 { T_WORM, SIP_MEDIA_REMOVABLE, "YAMAHA", "CDR100*", "*" },
  425                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
  426         },
  427         {
  428                 /*
  429                  * The 8200 doesn't like multi-lun probing, and probably
  430                  * don't like serial number requests either.
  431                  */
  432                 {
  433                         T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE",
  434                         "EXB-8200*", "*"
  435                 },
  436                 CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
  437         },
  438         {
  439                 /*
  440                  * Let's try the same as above, but for a drive that says
  441                  * it's an IPL-6860 but is actually an EXB 8200.
  442                  */
  443                 {
  444                         T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE",
  445                         "IPL-6860*", "*"
  446                 },
  447                 CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
  448         },
  449         {
  450                 /*
  451                  * These Hitachi drives don't like multi-lun probing.
  452                  * The PR submitter has a DK319H, but says that the Linux
  453                  * kernel has a similar work-around for the DK312 and DK314,
  454                  * so all DK31* drives are quirked here.
  455                  * PR:            misc/18793
  456                  * Submitted by:  Paul Haddad <paul@pth.com>
  457                  */
  458                 { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "DK31*", "*" },
  459                 CAM_QUIRK_NOLUNS, /*mintags*/2, /*maxtags*/255
  460         },
  461         {
  462                 /*
  463                  * The Hitachi CJ series with J8A8 firmware apparantly has
  464                  * problems with tagged commands.
  465                  * PR: 23536
  466                  * Reported by: amagai@nue.org
  467                  */
  468                 { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "DK32CJ*", "J8A8" },
  469                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
  470         },
  471         {
  472                 /*
  473                  * These are the large storage arrays.
  474                  * Submitted by:  William Carrel <william.carrel@infospace.com>
  475                  */
  476                 { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "OPEN*", "*" },
  477                 CAM_QUIRK_HILUNS, 2, 1024
  478         },
  479         {
  480                 /*
  481                  * This old revision of the TDC3600 is also SCSI-1, and
  482                  * hangs upon serial number probing.
  483                  */
  484                 {
  485                         T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "TANDBERG",
  486                         " TDC 3600", "U07:"
  487                 },
  488                 CAM_QUIRK_NOSERIAL, /*mintags*/0, /*maxtags*/
  489         },
  490         {
  491                 /*
  492                  * Maxtor Personal Storage 3000XT (Firewire)
  493                  * hangs upon serial number probing.
  494                  */
  495                 {
  496                         T_DIRECT, SIP_MEDIA_FIXED, "Maxtor",
  497                         "1394 storage", "*"
  498                 },
  499                 CAM_QUIRK_NOSERIAL, /*mintags*/0, /*maxtags*/
  500         },
  501         {
  502                 /*
  503                  * Would repond to all LUNs if asked for.
  504                  */
  505                 {
  506                         T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "CALIPER",
  507                         "CP150", "*"
  508                 },
  509                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
  510         },
  511         {
  512                 /*
  513                  * Would repond to all LUNs if asked for.
  514                  */
  515                 {
  516                         T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "KENNEDY",
  517                         "96X2*", "*"
  518                 },
  519                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
  520         },
  521         {
  522                 /* Submitted by: Matthew Dodd <winter@jurai.net> */
  523                 { T_PROCESSOR, SIP_MEDIA_FIXED, "Cabletrn", "EA41*", "*" },
  524                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
  525         },
  526         {
  527                 /* Submitted by: Matthew Dodd <winter@jurai.net> */
  528                 { T_PROCESSOR, SIP_MEDIA_FIXED, "CABLETRN", "EA41*", "*" },
  529                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
  530         },
  531         {
  532                 /* TeraSolutions special settings for TRC-22 RAID */
  533                 { T_DIRECT, SIP_MEDIA_FIXED, "TERASOLU", "TRC-22", "*" },
  534                   /*quirks*/0, /*mintags*/55, /*maxtags*/255
  535         },
  536         {
  537                 /* Veritas Storage Appliance */
  538                 { T_DIRECT, SIP_MEDIA_FIXED, "VERITAS", "*", "*" },
  539                   CAM_QUIRK_HILUNS, /*mintags*/2, /*maxtags*/1024
  540         },
  541         {
  542                 /*
  543                  * Would respond to all LUNs.  Device type and removable
  544                  * flag are jumper-selectable.
  545                  */
  546                 { T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED, "MaxOptix",
  547                   "Tahiti 1", "*"
  548                 },
  549                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
  550         },
  551         {
  552                 /* Default tagged queuing parameters for all devices */
  553                 {
  554                   T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
  555                   /*vendor*/"*", /*product*/"*", /*revision*/"*"
  556                 },
  557                 /*quirks*/0, /*mintags*/2, /*maxtags*/255
  558         },
  559 };
  560 
  561 static const int xpt_quirk_table_size =
  562         sizeof(xpt_quirk_table) / sizeof(*xpt_quirk_table);
  563 
  564 typedef enum {
  565         DM_RET_COPY             = 0x01,
  566         DM_RET_FLAG_MASK        = 0x0f,
  567         DM_RET_NONE             = 0x00,
  568         DM_RET_STOP             = 0x10,
  569         DM_RET_DESCEND          = 0x20,
  570         DM_RET_ERROR            = 0x30,
  571         DM_RET_ACTION_MASK      = 0xf0
  572 } dev_match_ret;
  573 
  574 typedef enum {
  575         XPT_DEPTH_BUS,
  576         XPT_DEPTH_TARGET,
  577         XPT_DEPTH_DEVICE,
  578         XPT_DEPTH_PERIPH
  579 } xpt_traverse_depth;
  580 
  581 struct xpt_traverse_config {
  582         xpt_traverse_depth      depth;
  583         void                    *tr_func;
  584         void                    *tr_arg;
  585 };
  586 
  587 typedef int     xpt_busfunc_t (struct cam_eb *bus, void *arg);
  588 typedef int     xpt_targetfunc_t (struct cam_et *target, void *arg);
  589 typedef int     xpt_devicefunc_t (struct cam_ed *device, void *arg);
  590 typedef int     xpt_periphfunc_t (struct cam_periph *periph, void *arg);
  591 typedef int     xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg);
  592 
  593 /* Transport layer configuration information */
  594 static struct xpt_softc xsoftc;
  595 
  596 /* Queues for our software interrupt handler */
  597 typedef TAILQ_HEAD(cam_isrq, ccb_hdr) cam_isrq_t;
  598 static cam_isrq_t cam_bioq;
  599 static cam_isrq_t cam_netq;
  600 
  601 /* "Pool" of inactive ccbs managed by xpt_alloc_ccb and xpt_free_ccb */
  602 static SLIST_HEAD(,ccb_hdr) ccb_freeq;
  603 static u_int xpt_max_ccbs;      /*
  604                                  * Maximum size of ccb pool.  Modified as
  605                                  * devices are added/removed or have their
  606                                  * opening counts changed.
  607                                  */
  608 static u_int xpt_ccb_count;     /* Current count of allocated ccbs */
  609 
  610 struct cam_periph *xpt_periph;
  611 
  612 static periph_init_t xpt_periph_init;
  613 
  614 static periph_init_t probe_periph_init;
  615 
  616 static struct periph_driver xpt_driver =
  617 {
  618         xpt_periph_init, "xpt",
  619         TAILQ_HEAD_INITIALIZER(xpt_driver.units)
  620 };
  621 
  622 static struct periph_driver probe_driver =
  623 {
  624         probe_periph_init, "probe",
  625         TAILQ_HEAD_INITIALIZER(probe_driver.units)
  626 };
  627 
  628 PERIPHDRIVER_DECLARE(xpt, xpt_driver);
  629 PERIPHDRIVER_DECLARE(probe, probe_driver);
  630 
  631 #define XPT_CDEV_MAJOR 104
  632 
  633 static d_open_t xptopen;
  634 static d_close_t xptclose;
  635 static d_ioctl_t xptioctl;
  636 
  637 static struct cdevsw xpt_cdevsw = {
  638         .d_open =       xptopen,
  639         .d_close =      xptclose,
  640         .d_ioctl =      xptioctl,
  641         .d_name =       "xpt",
  642         .d_maj =        XPT_CDEV_MAJOR,
  643 };
  644 
  645 static struct intr_config_hook *xpt_config_hook;
  646 
  647 /* Registered busses */
  648 static TAILQ_HEAD(,cam_eb) xpt_busses;
  649 static u_int bus_generation;
  650 
  651 /* Storage for debugging datastructures */
  652 #ifdef  CAMDEBUG
  653 struct cam_path *cam_dpath;
  654 u_int32_t cam_dflags;
  655 u_int32_t cam_debug_delay;
  656 #endif
  657 
  658 /* Pointers to software interrupt handlers */
  659 static void *camnet_ih;
  660 static void *cambio_ih;
  661 
  662 #if defined(CAM_DEBUG_FLAGS) && !defined(CAMDEBUG)
  663 #error "You must have options CAMDEBUG to use options CAM_DEBUG_FLAGS"
  664 #endif
  665 
  666 /*
  667  * In order to enable the CAM_DEBUG_* options, the user must have CAMDEBUG
  668  * enabled.  Also, the user must have either none, or all of CAM_DEBUG_BUS,
  669  * CAM_DEBUG_TARGET, and CAM_DEBUG_LUN specified.
  670  */
  671 #if defined(CAM_DEBUG_BUS) || defined(CAM_DEBUG_TARGET) \
  672     || defined(CAM_DEBUG_LUN)
  673 #ifdef CAMDEBUG
  674 #if !defined(CAM_DEBUG_BUS) || !defined(CAM_DEBUG_TARGET) \
  675     || !defined(CAM_DEBUG_LUN)
  676 #error "You must define all or none of CAM_DEBUG_BUS, CAM_DEBUG_TARGET \
  677         and CAM_DEBUG_LUN"
  678 #endif /* !CAM_DEBUG_BUS || !CAM_DEBUG_TARGET || !CAM_DEBUG_LUN */
  679 #else /* !CAMDEBUG */
  680 #error "You must use options CAMDEBUG if you use the CAM_DEBUG_* options"
  681 #endif /* CAMDEBUG */
  682 #endif /* CAM_DEBUG_BUS || CAM_DEBUG_TARGET || CAM_DEBUG_LUN */
  683 
  684 /* Our boot-time initialization hook */
  685 static int cam_module_event_handler(module_t, int /*modeventtype_t*/, void *);
  686 
  687 static moduledata_t cam_moduledata = {
  688         "cam",
  689         cam_module_event_handler,
  690         NULL
  691 };
  692 
  693 static void     xpt_init(void *);
  694 
  695 DECLARE_MODULE(cam, cam_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND);
  696 MODULE_VERSION(cam, 1);
  697 
  698 
  699 static cam_status       xpt_compile_path(struct cam_path *new_path,
  700                                          struct cam_periph *perph,
  701                                          path_id_t path_id,
  702                                          target_id_t target_id,
  703                                          lun_id_t lun_id);
  704 
  705 static void             xpt_release_path(struct cam_path *path);
  706 
  707 static void             xpt_async_bcast(struct async_list *async_head,
  708                                         u_int32_t async_code,
  709                                         struct cam_path *path,
  710                                         void *async_arg);
  711 static void             xpt_dev_async(u_int32_t async_code,
  712                                       struct cam_eb *bus,
  713                                       struct cam_et *target,
  714                                       struct cam_ed *device,
  715                                       void *async_arg);
  716 static path_id_t xptnextfreepathid(void);
  717 static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus);
  718 static union ccb *xpt_get_ccb(struct cam_ed *device);
  719 static int       xpt_schedule_dev(struct camq *queue, cam_pinfo *dev_pinfo,
  720                                   u_int32_t new_priority);
  721 static void      xpt_run_dev_allocq(struct cam_eb *bus);
  722 static void      xpt_run_dev_sendq(struct cam_eb *bus);
  723 static timeout_t xpt_release_devq_timeout;
  724 static timeout_t xpt_release_simq_timeout;
  725 static void      xpt_release_bus(struct cam_eb *bus);
  726 static void      xpt_release_devq_device(struct cam_ed *dev, u_int count,
  727                                          int run_queue);
  728 static struct cam_et*
  729                  xpt_alloc_target(struct cam_eb *bus, target_id_t target_id);
  730 static void      xpt_release_target(struct cam_eb *bus, struct cam_et *target);
  731 static struct cam_ed*
  732                  xpt_alloc_device(struct cam_eb *bus, struct cam_et *target,
  733                                   lun_id_t lun_id);
  734 static void      xpt_release_device(struct cam_eb *bus, struct cam_et *target,
  735                                     struct cam_ed *device);
  736 static u_int32_t xpt_dev_ccbq_resize(struct cam_path *path, int newopenings);
  737 static struct cam_eb*
  738                  xpt_find_bus(path_id_t path_id);
  739 static struct cam_et*
  740                  xpt_find_target(struct cam_eb *bus, target_id_t target_id);
  741 static struct cam_ed*
  742                  xpt_find_device(struct cam_et *target, lun_id_t lun_id);
  743 static void      xpt_scan_bus(struct cam_periph *periph, union ccb *ccb);
  744 static void      xpt_scan_lun(struct cam_periph *periph,
  745                               struct cam_path *path, cam_flags flags,
  746                               union ccb *ccb);
  747 static void      xptscandone(struct cam_periph *periph, union ccb *done_ccb);
  748 static xpt_busfunc_t    xptconfigbuscountfunc;
  749 static xpt_busfunc_t    xptconfigfunc;
  750 static void      xpt_config(void *arg);
  751 static xpt_devicefunc_t xptpassannouncefunc;
  752 static void      xpt_finishconfig(struct cam_periph *periph, union ccb *ccb);
  753 static void      xptaction(struct cam_sim *sim, union ccb *work_ccb);
  754 static void      xptpoll(struct cam_sim *sim);
  755 static void      camisr(void *);
  756 #if 0
  757 static void      xptstart(struct cam_periph *periph, union ccb *work_ccb);
  758 static void      xptasync(struct cam_periph *periph,
  759                           u_int32_t code, cam_path *path);
  760 #endif
  761 static dev_match_ret    xptbusmatch(struct dev_match_pattern *patterns,
  762                                     u_int num_patterns, struct cam_eb *bus);
  763 static dev_match_ret    xptdevicematch(struct dev_match_pattern *patterns,
  764                                        u_int num_patterns,
  765                                        struct cam_ed *device);
  766 static dev_match_ret    xptperiphmatch(struct dev_match_pattern *patterns,
  767                                        u_int num_patterns,
  768                                        struct cam_periph *periph);
  769 static xpt_busfunc_t    xptedtbusfunc;
  770 static xpt_targetfunc_t xptedttargetfunc;
  771 static xpt_devicefunc_t xptedtdevicefunc;
  772 static xpt_periphfunc_t xptedtperiphfunc;
  773 static xpt_pdrvfunc_t   xptplistpdrvfunc;
  774 static xpt_periphfunc_t xptplistperiphfunc;
  775 static int              xptedtmatch(struct ccb_dev_match *cdm);
  776 static int              xptperiphlistmatch(struct ccb_dev_match *cdm);
  777 static int              xptbustraverse(struct cam_eb *start_bus,
  778                                        xpt_busfunc_t *tr_func, void *arg);
  779 static int              xpttargettraverse(struct cam_eb *bus,
  780                                           struct cam_et *start_target,
  781                                           xpt_targetfunc_t *tr_func, void *arg);
  782 static int              xptdevicetraverse(struct cam_et *target,
  783                                           struct cam_ed *start_device,
  784                                           xpt_devicefunc_t *tr_func, void *arg);
  785 static int              xptperiphtraverse(struct cam_ed *device,
  786                                           struct cam_periph *start_periph,
  787                                           xpt_periphfunc_t *tr_func, void *arg);
  788 static int              xptpdrvtraverse(struct periph_driver **start_pdrv,
  789                                         xpt_pdrvfunc_t *tr_func, void *arg);
  790 static int              xptpdperiphtraverse(struct periph_driver **pdrv,
  791                                             struct cam_periph *start_periph,
  792                                             xpt_periphfunc_t *tr_func,
  793                                             void *arg);
  794 static xpt_busfunc_t    xptdefbusfunc;
  795 static xpt_targetfunc_t xptdeftargetfunc;
  796 static xpt_devicefunc_t xptdefdevicefunc;
  797 static xpt_periphfunc_t xptdefperiphfunc;
  798 static int              xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg);
  799 #ifdef notusedyet
  800 static int              xpt_for_all_targets(xpt_targetfunc_t *tr_func,
  801                                             void *arg);
  802 #endif
  803 static int              xpt_for_all_devices(xpt_devicefunc_t *tr_func,
  804                                             void *arg);
  805 #ifdef notusedyet
  806 static int              xpt_for_all_periphs(xpt_periphfunc_t *tr_func,
  807                                             void *arg);
  808 #endif
  809 static xpt_devicefunc_t xptsetasyncfunc;
  810 static xpt_busfunc_t    xptsetasyncbusfunc;
  811 static cam_status       xptregister(struct cam_periph *periph,
  812                                     void *arg);
  813 static cam_status       proberegister(struct cam_periph *periph,
  814                                       void *arg);
  815 static void      probeschedule(struct cam_periph *probe_periph);
  816 static void      probestart(struct cam_periph *periph, union ccb *start_ccb);
  817 static void      proberequestdefaultnegotiation(struct cam_periph *periph);
  818 static void      probedone(struct cam_periph *periph, union ccb *done_ccb);
  819 static void      probecleanup(struct cam_periph *periph);
  820 static void      xpt_find_quirk(struct cam_ed *device);
  821 #ifdef CAM_NEW_TRAN_CODE
  822 static void      xpt_devise_transport(struct cam_path *path);
  823 #endif /* CAM_NEW_TRAN_CODE */
  824 static void      xpt_set_transfer_settings(struct ccb_trans_settings *cts,
  825                                            struct cam_ed *device,
  826                                            int async_update);
  827 static void      xpt_toggle_tags(struct cam_path *path);
  828 static void      xpt_start_tags(struct cam_path *path);
  829 static __inline int xpt_schedule_dev_allocq(struct cam_eb *bus,
  830                                             struct cam_ed *dev);
  831 static __inline int xpt_schedule_dev_sendq(struct cam_eb *bus,
  832                                            struct cam_ed *dev);
  833 static __inline int periph_is_queued(struct cam_periph *periph);
  834 static __inline int device_is_alloc_queued(struct cam_ed *device);
  835 static __inline int device_is_send_queued(struct cam_ed *device);
  836 static __inline int dev_allocq_is_runnable(struct cam_devq *devq);
  837 
  838 static __inline int
  839 xpt_schedule_dev_allocq(struct cam_eb *bus, struct cam_ed *dev)
  840 {
  841         int retval;
  842 
  843         if (dev->ccbq.devq_openings > 0) {
  844                 if ((dev->flags & CAM_DEV_RESIZE_QUEUE_NEEDED) != 0) {
  845                         cam_ccbq_resize(&dev->ccbq,
  846                                         dev->ccbq.dev_openings
  847                                         + dev->ccbq.dev_active);
  848                         dev->flags &= ~CAM_DEV_RESIZE_QUEUE_NEEDED;
  849                 }
  850                 /*
  851                  * The priority of a device waiting for CCB resources
  852                  * is that of the the highest priority peripheral driver
  853                  * enqueued.
  854                  */
  855                 retval = xpt_schedule_dev(&bus->sim->devq->alloc_queue,
  856                                           &dev->alloc_ccb_entry.pinfo,
  857                                           CAMQ_GET_HEAD(&dev->drvq)->priority); 
  858         } else {
  859                 retval = 0;
  860         }
  861 
  862         return (retval);
  863 }
  864 
  865 static __inline int
  866 xpt_schedule_dev_sendq(struct cam_eb *bus, struct cam_ed *dev)
  867 {
  868         int     retval;
  869 
  870         if (dev->ccbq.dev_openings > 0) {
  871                 /*
  872                  * The priority of a device waiting for controller
  873                  * resources is that of the the highest priority CCB
  874                  * enqueued.
  875                  */
  876                 retval =
  877                     xpt_schedule_dev(&bus->sim->devq->send_queue,
  878                                      &dev->send_ccb_entry.pinfo,
  879                                      CAMQ_GET_HEAD(&dev->ccbq.queue)->priority);
  880         } else {
  881                 retval = 0;
  882         }
  883         return (retval);
  884 }
  885 
  886 static __inline int
  887 periph_is_queued(struct cam_periph *periph)
  888 {
  889         return (periph->pinfo.index != CAM_UNQUEUED_INDEX);
  890 }
  891 
  892 static __inline int
  893 device_is_alloc_queued(struct cam_ed *device)
  894 {
  895         return (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
  896 }
  897 
  898 static __inline int
  899 device_is_send_queued(struct cam_ed *device)
  900 {
  901         return (device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
  902 }
  903 
  904 static __inline int
  905 dev_allocq_is_runnable(struct cam_devq *devq)
  906 {
  907         /*
  908          * Have work to do.
  909          * Have space to do more work.
  910          * Allowed to do work.
  911          */
  912         return ((devq->alloc_queue.qfrozen_cnt == 0)
  913              && (devq->alloc_queue.entries > 0)
  914              && (devq->alloc_openings > 0));
  915 }
  916 
  917 static void
  918 xpt_periph_init()
  919 {
  920         make_dev(&xpt_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "xpt0");
  921 }
  922 
  923 static void
  924 probe_periph_init()
  925 {
  926 }
  927 
  928 
  929 static void
  930 xptdone(struct cam_periph *periph, union ccb *done_ccb)
  931 {
  932         /* Caller will release the CCB */
  933         wakeup(&done_ccb->ccb_h.cbfcnp);
  934 }
  935 
  936 static int
  937 xptopen(dev_t dev, int flags, int fmt, struct thread *td)
  938 {
  939         int unit;
  940 
  941         unit = minor(dev) & 0xff;
  942 
  943         /*
  944          * Only allow read-write access.
  945          */
  946         if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0))
  947                 return(EPERM);
  948 
  949         /*
  950          * We don't allow nonblocking access.
  951          */
  952         if ((flags & O_NONBLOCK) != 0) {
  953                 printf("xpt%d: can't do nonblocking access\n", unit);
  954                 return(ENODEV);
  955         }
  956 
  957         /*
  958          * We only have one transport layer right now.  If someone accesses
  959          * us via something other than minor number 1, point out their
  960          * mistake.
  961          */
  962         if (unit != 0) {
  963                 printf("xptopen: got invalid xpt unit %d\n", unit);
  964                 return(ENXIO);
  965         }
  966 
  967         /* Mark ourselves open */
  968         xsoftc.flags |= XPT_FLAG_OPEN;
  969         
  970         return(0);
  971 }
  972 
  973 static int
  974 xptclose(dev_t dev, int flag, int fmt, struct thread *td)
  975 {
  976         int unit;
  977 
  978         unit = minor(dev) & 0xff;
  979 
  980         /*
  981          * We only have one transport layer right now.  If someone accesses
  982          * us via something other than minor number 1, point out their
  983          * mistake.
  984          */
  985         if (unit != 0) {
  986                 printf("xptclose: got invalid xpt unit %d\n", unit);
  987                 return(ENXIO);
  988         }
  989 
  990         /* Mark ourselves closed */
  991         xsoftc.flags &= ~XPT_FLAG_OPEN;
  992 
  993         return(0);
  994 }
  995 
  996 static int
  997 xptioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
  998 {
  999         int unit, error;
 1000 
 1001         error = 0;
 1002         unit = minor(dev) & 0xff;
 1003 
 1004         /*
 1005          * We only have one transport layer right now.  If someone accesses
 1006          * us via something other than minor number 1, point out their
 1007          * mistake.
 1008          */
 1009         if (unit != 0) {
 1010                 printf("xptioctl: got invalid xpt unit %d\n", unit);
 1011                 return(ENXIO);
 1012         }
 1013 
 1014         switch(cmd) {
 1015         /*
 1016          * For the transport layer CAMIOCOMMAND ioctl, we really only want
 1017          * to accept CCB types that don't quite make sense to send through a
 1018          * passthrough driver. XPT_PATH_INQ is an exception to this, as stated
 1019          * in the CAM spec.
 1020          */
 1021         case CAMIOCOMMAND: {
 1022                 union ccb *ccb;
 1023                 union ccb *inccb;
 1024 
 1025                 inccb = (union ccb *)addr;
 1026 
 1027                 switch(inccb->ccb_h.func_code) {
 1028                 case XPT_SCAN_BUS:
 1029                 case XPT_RESET_BUS:
 1030                         if ((inccb->ccb_h.target_id != CAM_TARGET_WILDCARD)
 1031                          || (inccb->ccb_h.target_lun != CAM_LUN_WILDCARD)) {
 1032                                 error = EINVAL;
 1033                                 break;
 1034                         }
 1035                         /* FALLTHROUGH */
 1036                 case XPT_PATH_INQ:
 1037                 case XPT_ENG_INQ:
 1038                 case XPT_SCAN_LUN:
 1039 
 1040                         ccb = xpt_alloc_ccb();
 1041 
 1042                         /*
 1043                          * Create a path using the bus, target, and lun the
 1044                          * user passed in.
 1045                          */
 1046                         if (xpt_create_path(&ccb->ccb_h.path, xpt_periph,
 1047                                             inccb->ccb_h.path_id,
 1048                                             inccb->ccb_h.target_id,
 1049                                             inccb->ccb_h.target_lun) !=
 1050                                             CAM_REQ_CMP){
 1051                                 error = EINVAL;
 1052                                 xpt_free_ccb(ccb);
 1053                                 break;
 1054                         }
 1055                         /* Ensure all of our fields are correct */
 1056                         xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path,
 1057                                       inccb->ccb_h.pinfo.priority);
 1058                         xpt_merge_ccb(ccb, inccb);
 1059                         ccb->ccb_h.cbfcnp = xptdone;
 1060                         cam_periph_runccb(ccb, NULL, 0, 0, NULL);
 1061                         bcopy(ccb, inccb, sizeof(union ccb));
 1062                         xpt_free_path(ccb->ccb_h.path);
 1063                         xpt_free_ccb(ccb);
 1064                         break;
 1065 
 1066                 case XPT_DEBUG: {
 1067                         union ccb ccb;
 1068 
 1069                         /*
 1070                          * This is an immediate CCB, so it's okay to
 1071                          * allocate it on the stack.
 1072                          */
 1073 
 1074                         /*
 1075                          * Create a path using the bus, target, and lun the
 1076                          * user passed in.
 1077                          */
 1078                         if (xpt_create_path(&ccb.ccb_h.path, xpt_periph,
 1079                                             inccb->ccb_h.path_id,
 1080                                             inccb->ccb_h.target_id,
 1081                                             inccb->ccb_h.target_lun) !=
 1082                                             CAM_REQ_CMP){
 1083                                 error = EINVAL;
 1084                                 break;
 1085                         }
 1086                         /* Ensure all of our fields are correct */
 1087                         xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path,
 1088                                       inccb->ccb_h.pinfo.priority);
 1089                         xpt_merge_ccb(&ccb, inccb);
 1090                         ccb.ccb_h.cbfcnp = xptdone;
 1091                         xpt_action(&ccb);
 1092                         bcopy(&ccb, inccb, sizeof(union ccb));
 1093                         xpt_free_path(ccb.ccb_h.path);
 1094                         break;
 1095 
 1096                 }
 1097                 case XPT_DEV_MATCH: {
 1098                         struct cam_periph_map_info mapinfo;
 1099                         struct cam_path *old_path;
 1100 
 1101                         /*
 1102                          * We can't deal with physical addresses for this
 1103                          * type of transaction.
 1104                          */
 1105                         if (inccb->ccb_h.flags & CAM_DATA_PHYS) {
 1106                                 error = EINVAL;
 1107                                 break;
 1108                         }
 1109 
 1110                         /*
 1111                          * Save this in case the caller had it set to
 1112                          * something in particular.
 1113                          */
 1114                         old_path = inccb->ccb_h.path;
 1115 
 1116                         /*
 1117                          * We really don't need a path for the matching
 1118                          * code.  The path is needed because of the
 1119                          * debugging statements in xpt_action().  They
 1120                          * assume that the CCB has a valid path.
 1121                          */
 1122                         inccb->ccb_h.path = xpt_periph->path;
 1123 
 1124                         bzero(&mapinfo, sizeof(mapinfo));
 1125 
 1126                         /*
 1127                          * Map the pattern and match buffers into kernel
 1128                          * virtual address space.
 1129                          */
 1130                         error = cam_periph_mapmem(inccb, &mapinfo);
 1131 
 1132                         if (error) {
 1133                                 inccb->ccb_h.path = old_path;
 1134                                 break;
 1135                         }
 1136 
 1137                         /*
 1138                          * This is an immediate CCB, we can send it on directly.
 1139                          */
 1140                         xpt_action(inccb);
 1141 
 1142                         /*
 1143                          * Map the buffers back into user space.
 1144                          */
 1145                         cam_periph_unmapmem(inccb, &mapinfo);
 1146 
 1147                         inccb->ccb_h.path = old_path;
 1148 
 1149                         error = 0;
 1150                         break;
 1151                 }
 1152                 default:
 1153                         error = ENOTSUP;
 1154                         break;
 1155                 }
 1156                 break;
 1157         }
 1158         /*
 1159          * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input,
 1160          * with the periphal driver name and unit name filled in.  The other
 1161          * fields don't really matter as input.  The passthrough driver name
 1162          * ("pass"), and unit number are passed back in the ccb.  The current
 1163          * device generation number, and the index into the device peripheral
 1164          * driver list, and the status are also passed back.  Note that
 1165          * since we do everything in one pass, unlike the XPT_GDEVLIST ccb,
 1166          * we never return a status of CAM_GDEVLIST_LIST_CHANGED.  It is
 1167          * (or rather should be) impossible for the device peripheral driver
 1168          * list to change since we look at the whole thing in one pass, and
 1169          * we do it with splcam protection.
 1170          * 
 1171          */
 1172         case CAMGETPASSTHRU: {
 1173                 union ccb *ccb;
 1174                 struct cam_periph *periph;
 1175                 struct periph_driver **p_drv;
 1176                 char   *name;
 1177                 u_int unit;
 1178                 u_int cur_generation;
 1179                 int base_periph_found;
 1180                 int splbreaknum;
 1181                 int s;
 1182 
 1183                 ccb = (union ccb *)addr;
 1184                 unit = ccb->cgdl.unit_number;
 1185                 name = ccb->cgdl.periph_name;
 1186                 /*
 1187                  * Every 100 devices, we want to drop our spl protection to
 1188                  * give the software interrupt handler a chance to run.
 1189                  * Most systems won't run into this check, but this should
 1190                  * avoid starvation in the software interrupt handler in
 1191                  * large systems.
 1192                  */
 1193                 splbreaknum = 100;
 1194 
 1195                 ccb = (union ccb *)addr;
 1196 
 1197                 base_periph_found = 0;
 1198 
 1199                 /*
 1200                  * Sanity check -- make sure we don't get a null peripheral
 1201                  * driver name.
 1202                  */
 1203                 if (*ccb->cgdl.periph_name == '\0') {
 1204                         error = EINVAL;
 1205                         break;
 1206                 }
 1207 
 1208                 /* Keep the list from changing while we traverse it */
 1209                 s = splcam();
 1210 ptstartover:
 1211                 cur_generation = xsoftc.generation;
 1212 
 1213                 /* first find our driver in the list of drivers */
 1214                 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++)
 1215                         if (strcmp((*p_drv)->driver_name, name) == 0)
 1216                                 break;
 1217 
 1218                 if (*p_drv == NULL) {
 1219                         splx(s);
 1220                         ccb->ccb_h.status = CAM_REQ_CMP_ERR;
 1221                         ccb->cgdl.status = CAM_GDEVLIST_ERROR;
 1222                         *ccb->cgdl.periph_name = '\0';
 1223                         ccb->cgdl.unit_number = 0;
 1224                         error = ENOENT;
 1225                         break;
 1226                 }       
 1227 
 1228                 /*
 1229                  * Run through every peripheral instance of this driver
 1230                  * and check to see whether it matches the unit passed
 1231                  * in by the user.  If it does, get out of the loops and
 1232                  * find the passthrough driver associated with that
 1233                  * peripheral driver.
 1234                  */
 1235                 for (periph = TAILQ_FIRST(&(*p_drv)->units); periph != NULL;
 1236                      periph = TAILQ_NEXT(periph, unit_links)) {
 1237 
 1238                         if (periph->unit_number == unit) {
 1239                                 break;
 1240                         } else if (--splbreaknum == 0) {
 1241                                 splx(s);
 1242                                 s = splcam();
 1243                                 splbreaknum = 100;
 1244                                 if (cur_generation != xsoftc.generation)
 1245                                        goto ptstartover;
 1246                         }
 1247                 }
 1248                 /*
 1249                  * If we found the peripheral driver that the user passed
 1250                  * in, go through all of the peripheral drivers for that
 1251                  * particular device and look for a passthrough driver.
 1252                  */
 1253                 if (periph != NULL) {
 1254                         struct cam_ed *device;
 1255                         int i;
 1256 
 1257                         base_periph_found = 1;
 1258                         device = periph->path->device;
 1259                         for (i = 0, periph = SLIST_FIRST(&device->periphs);
 1260                              periph != NULL;
 1261                              periph = SLIST_NEXT(periph, periph_links), i++) {
 1262                                 /*
 1263                                  * Check to see whether we have a
 1264                                  * passthrough device or not. 
 1265                                  */
 1266                                 if (strcmp(periph->periph_name, "pass") == 0) {
 1267                                         /*
 1268                                          * Fill in the getdevlist fields.
 1269                                          */
 1270                                         strcpy(ccb->cgdl.periph_name,
 1271                                                periph->periph_name);
 1272                                         ccb->cgdl.unit_number =
 1273                                                 periph->unit_number;
 1274                                         if (SLIST_NEXT(periph, periph_links))
 1275                                                 ccb->cgdl.status =
 1276                                                         CAM_GDEVLIST_MORE_DEVS;
 1277                                         else
 1278                                                 ccb->cgdl.status =
 1279                                                        CAM_GDEVLIST_LAST_DEVICE;
 1280                                         ccb->cgdl.generation =
 1281                                                 device->generation;
 1282                                         ccb->cgdl.index = i;
 1283                                         /*
 1284                                          * Fill in some CCB header fields
 1285                                          * that the user may want.
 1286                                          */
 1287                                         ccb->ccb_h.path_id =
 1288                                                 periph->path->bus->path_id;
 1289                                         ccb->ccb_h.target_id =
 1290                                                 periph->path->target->target_id;
 1291                                         ccb->ccb_h.target_lun =
 1292                                                 periph->path->device->lun_id;
 1293                                         ccb->ccb_h.status = CAM_REQ_CMP;
 1294                                         break;
 1295                                 }
 1296                         }
 1297                 }
 1298 
 1299                 /*
 1300                  * If the periph is null here, one of two things has
 1301                  * happened.  The first possibility is that we couldn't
 1302                  * find the unit number of the particular peripheral driver
 1303                  * that the user is asking about.  e.g. the user asks for
 1304                  * the passthrough driver for "da11".  We find the list of
 1305                  * "da" peripherals all right, but there is no unit 11.
 1306                  * The other possibility is that we went through the list
 1307                  * of peripheral drivers attached to the device structure,
 1308                  * but didn't find one with the name "pass".  Either way,
 1309                  * we return ENOENT, since we couldn't find something.
 1310                  */
 1311                 if (periph == NULL) {
 1312                         ccb->ccb_h.status = CAM_REQ_CMP_ERR;
 1313                         ccb->cgdl.status = CAM_GDEVLIST_ERROR;
 1314                         *ccb->cgdl.periph_name = '\0';
 1315                         ccb->cgdl.unit_number = 0;
 1316                         error = ENOENT;
 1317                         /*
 1318                          * It is unfortunate that this is even necessary,
 1319                          * but there are many, many clueless users out there.
 1320                          * If this is true, the user is looking for the
 1321                          * passthrough driver, but doesn't have one in his
 1322                          * kernel.
 1323                          */
 1324                         if (base_periph_found == 1) {
 1325                                 printf("xptioctl: pass driver is not in the "
 1326                                        "kernel\n");
 1327                                 printf("xptioctl: put \"device pass0\" in "
 1328                                        "your kernel config file\n");
 1329                         }
 1330                 }
 1331                 splx(s);
 1332                 break;
 1333                 }
 1334         default:
 1335                 error = ENOTTY;
 1336                 break;
 1337         }
 1338 
 1339         return(error);
 1340 }
 1341 
 1342 static int
 1343 cam_module_event_handler(module_t mod, int what, void *arg)
 1344 {
 1345         if (what == MOD_LOAD) {
 1346                 xpt_init(NULL);
 1347         } else if (what == MOD_UNLOAD) {
 1348                 return EBUSY;
 1349         }
 1350 
 1351         return 0;
 1352 }
 1353 
 1354 /* Functions accessed by the peripheral drivers */
 1355 static void
 1356 xpt_init(dummy)
 1357         void *dummy;
 1358 {
 1359         struct cam_sim *xpt_sim;
 1360         struct cam_path *path;
 1361         struct cam_devq *devq;
 1362         cam_status status;
 1363 
 1364         TAILQ_INIT(&xpt_busses);
 1365         TAILQ_INIT(&cam_bioq);
 1366         TAILQ_INIT(&cam_netq);
 1367         SLIST_INIT(&ccb_freeq);
 1368         STAILQ_INIT(&highpowerq);
 1369 
 1370         /*
 1371          * The xpt layer is, itself, the equivelent of a SIM.
 1372          * Allow 16 ccbs in the ccb pool for it.  This should
 1373          * give decent parallelism when we probe busses and
 1374          * perform other XPT functions.
 1375          */
 1376         devq = cam_simq_alloc(16);
 1377         xpt_sim = cam_sim_alloc(xptaction,
 1378                                 xptpoll,
 1379                                 "xpt",
 1380                                 /*softc*/NULL,
 1381                                 /*unit*/0,
 1382                                 /*max_dev_transactions*/0,
 1383                                 /*max_tagged_dev_transactions*/0,
 1384                                 devq);
 1385         xpt_max_ccbs = 16;
 1386                                 
 1387         xpt_bus_register(xpt_sim, /*bus #*/0);
 1388 
 1389         /*
 1390          * Looking at the XPT from the SIM layer, the XPT is
 1391          * the equivelent of a peripheral driver.  Allocate
 1392          * a peripheral driver entry for us.
 1393          */
 1394         if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID,
 1395                                       CAM_TARGET_WILDCARD,
 1396                                       CAM_LUN_WILDCARD)) != CAM_REQ_CMP) {
 1397                 printf("xpt_init: xpt_create_path failed with status %#x,"
 1398                        " failing attach\n", status);
 1399                 return;
 1400         }
 1401 
 1402         cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO,
 1403                          path, NULL, 0, NULL);
 1404         xpt_free_path(path);
 1405 
 1406         xpt_sim->softc = xpt_periph;
 1407 
 1408         /*
 1409          * Register a callback for when interrupts are enabled.
 1410          */
 1411         xpt_config_hook =
 1412             (struct intr_config_hook *)malloc(sizeof(struct intr_config_hook),
 1413                                               M_TEMP, M_NOWAIT | M_ZERO);
 1414         if (xpt_config_hook == NULL) {
 1415                 printf("xpt_init: Cannot malloc config hook "
 1416                        "- failing attach\n");
 1417                 return;
 1418         }
 1419 
 1420         xpt_config_hook->ich_func = xpt_config;
 1421         if (config_intrhook_establish(xpt_config_hook) != 0) {
 1422                 free (xpt_config_hook, M_TEMP);
 1423                 printf("xpt_init: config_intrhook_establish failed "
 1424                        "- failing attach\n");
 1425         }
 1426 
 1427         /* Install our software interrupt handlers */
 1428         swi_add(NULL, "camnet", camisr, &cam_netq, SWI_CAMNET, 0, &camnet_ih);
 1429         swi_add(NULL, "cambio", camisr, &cam_bioq, SWI_CAMBIO, 0, &cambio_ih);
 1430 }
 1431 
 1432 static cam_status
 1433 xptregister(struct cam_periph *periph, void *arg)
 1434 {
 1435         if (periph == NULL) {
 1436                 printf("xptregister: periph was NULL!!\n");
 1437                 return(CAM_REQ_CMP_ERR);
 1438         }
 1439 
 1440         periph->softc = NULL;
 1441 
 1442         xpt_periph = periph;
 1443 
 1444         return(CAM_REQ_CMP);
 1445 }
 1446 
 1447 int32_t
 1448 xpt_add_periph(struct cam_periph *periph)
 1449 {
 1450         struct cam_ed *device;
 1451         int32_t  status;
 1452         struct periph_list *periph_head;
 1453 
 1454         device = periph->path->device;
 1455 
 1456         periph_head = &device->periphs;
 1457 
 1458         status = CAM_REQ_CMP;
 1459 
 1460         if (device != NULL) {
 1461                 int s;
 1462 
 1463                 /*
 1464                  * Make room for this peripheral
 1465                  * so it will fit in the queue
 1466                  * when it's scheduled to run
 1467                  */
 1468                 s = splsoftcam();
 1469                 status = camq_resize(&device->drvq,
 1470                                      device->drvq.array_size + 1);
 1471 
 1472                 device->generation++;
 1473 
 1474                 SLIST_INSERT_HEAD(periph_head, periph, periph_links);
 1475 
 1476                 splx(s);
 1477         }
 1478 
 1479         xsoftc.generation++;
 1480 
 1481         return (status);
 1482 }
 1483 
 1484 void
 1485 xpt_remove_periph(struct cam_periph *periph)
 1486 {
 1487         struct cam_ed *device;
 1488 
 1489         device = periph->path->device;
 1490 
 1491         if (device != NULL) {
 1492                 int s;
 1493                 struct periph_list *periph_head;
 1494 
 1495                 periph_head = &device->periphs;
 1496                 
 1497                 /* Release the slot for this peripheral */
 1498                 s = splsoftcam();
 1499                 camq_resize(&device->drvq, device->drvq.array_size - 1);
 1500 
 1501                 device->generation++;
 1502 
 1503                 SLIST_REMOVE(periph_head, periph, cam_periph, periph_links);
 1504 
 1505                 splx(s);
 1506         }
 1507 
 1508         xsoftc.generation++;
 1509 
 1510 }
 1511 
 1512 #ifdef CAM_NEW_TRAN_CODE
 1513 
 1514 void
 1515 xpt_announce_periph(struct cam_periph *periph, char *announce_string)
 1516 {
 1517         struct  ccb_pathinq cpi;
 1518         struct  ccb_trans_settings cts;
 1519         struct  cam_path *path;
 1520         u_int   speed;
 1521         u_int   freq;
 1522         u_int   mb;
 1523         int     s;
 1524 
 1525         path = periph->path;
 1526         /*
 1527          * To ensure that this is printed in one piece,
 1528          * mask out CAM interrupts.
 1529          */
 1530         s = splsoftcam();
 1531         printf("%s%d at %s%d bus %d target %d lun %d\n",
 1532                periph->periph_name, periph->unit_number,
 1533                path->bus->sim->sim_name,
 1534                path->bus->sim->unit_number,
 1535                path->bus->sim->bus_id,
 1536                path->target->target_id,
 1537                path->device->lun_id);
 1538         printf("%s%d: ", periph->periph_name, periph->unit_number);
 1539         scsi_print_inquiry(&path->device->inq_data);
 1540         if (bootverbose && path->device->serial_num_len > 0) {
 1541                 /* Don't wrap the screen  - print only the first 60 chars */
 1542                 printf("%s%d: Serial Number %.60s\n", periph->periph_name,
 1543                        periph->unit_number, path->device->serial_num);
 1544         }
 1545         xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1);
 1546         cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
 1547         cts.type = CTS_TYPE_CURRENT_SETTINGS;
 1548         xpt_action((union ccb*)&cts);
 1549 
 1550         /* Ask the SIM for its base transfer speed */
 1551         xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
 1552         cpi.ccb_h.func_code = XPT_PATH_INQ;
 1553         xpt_action((union ccb *)&cpi);
 1554 
 1555         speed = cpi.base_transfer_speed;
 1556         freq = 0;
 1557         if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SPI) {
 1558                 struct  ccb_trans_settings_spi *spi;
 1559 
 1560                 spi = &cts.xport_specific.spi;
 1561                 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0
 1562                   && spi->sync_offset != 0) {
 1563                         freq = scsi_calc_syncsrate(spi->sync_period);
 1564                         speed = freq;
 1565                 }
 1566 
 1567                 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0)
 1568                         speed *= (0x01 << spi->bus_width);
 1569         }
 1570 
 1571         if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_FC) {
 1572                 struct  ccb_trans_settings_fc *fc = &cts.xport_specific.fc;
 1573                 if (fc->valid & CTS_FC_VALID_SPEED) {
 1574                         speed = fc->bitrate;
 1575                 }
 1576         }
 1577 
 1578         mb = speed / 1000;
 1579         if (mb > 0)
 1580                 printf("%s%d: %d.%03dMB/s transfers",
 1581                        periph->periph_name, periph->unit_number,
 1582                        mb, speed % 1000);
 1583         else
 1584                 printf("%s%d: %dKB/s transfers", periph->periph_name,
 1585                        periph->unit_number, speed);
 1586         /* Report additional information about SPI connections */
 1587         if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SPI) {
 1588                 struct  ccb_trans_settings_spi *spi;
 1589 
 1590                 spi = &cts.xport_specific.spi;
 1591                 if (freq != 0) {
 1592                         printf(" (%d.%03dMHz%s, offset %d", freq / 1000,
 1593                                freq % 1000,
 1594                                (spi->ppr_options & MSG_EXT_PPR_DT_REQ) != 0
 1595                              ? " DT" : "",
 1596                                spi->sync_offset);
 1597                 }
 1598                 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0
 1599                  && spi->bus_width > 0) {
 1600                         if (freq != 0) {
 1601                                 printf(", ");
 1602                         } else {
 1603                                 printf(" (");
 1604                         }
 1605                         printf("%dbit)", 8 * (0x01 << spi->bus_width));
 1606                 } else if (freq != 0) {
 1607                         printf(")");
 1608                 }
 1609         }
 1610         if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_FC) {
 1611                 struct  ccb_trans_settings_fc *fc;
 1612 
 1613                 fc = &cts.xport_specific.fc;
 1614                 if (fc->valid & CTS_FC_VALID_WWNN)
 1615                         printf(" WWNN 0x%llx", (long long) fc->wwnn);
 1616                 if (fc->valid & CTS_FC_VALID_WWPN)
 1617                         printf(" WWPN 0x%llx", (long long) fc->wwpn);
 1618                 if (fc->valid & CTS_FC_VALID_PORT)
 1619                         printf(" PortID 0x%x", fc->port);
 1620         }
 1621 
 1622         if (path->device->inq_flags & SID_CmdQue
 1623          || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
 1624                 printf("\n%s%d: Tagged Queueing Enabled",
 1625                        periph->periph_name, periph->unit_number);
 1626         }
 1627         printf("\n");
 1628 
 1629         /*
 1630          * We only want to print the caller's announce string if they've
 1631          * passed one in..
 1632          */
 1633         if (announce_string != NULL)
 1634                 printf("%s%d: %s\n", periph->periph_name,
 1635                        periph->unit_number, announce_string);
 1636         splx(s);
 1637 }
 1638 #else /* CAM_NEW_TRAN_CODE */
 1639 void
 1640 xpt_announce_periph(struct cam_periph *periph, char *announce_string)
 1641 {
 1642         int s;
 1643         u_int mb;
 1644         struct cam_path *path;
 1645         struct ccb_trans_settings cts;
 1646 
 1647         path = periph->path;
 1648         /*
 1649          * To ensure that this is printed in one piece,
 1650          * mask out CAM interrupts.
 1651          */
 1652         s = splsoftcam();
 1653         printf("%s%d at %s%d bus %d target %d lun %d\n",
 1654                periph->periph_name, periph->unit_number,
 1655                path->bus->sim->sim_name,
 1656                path->bus->sim->unit_number,
 1657                path->bus->sim->bus_id,
 1658                path->target->target_id,
 1659                path->device->lun_id);
 1660         printf("%s%d: ", periph->periph_name, periph->unit_number);
 1661         scsi_print_inquiry(&path->device->inq_data);
 1662         if ((bootverbose)
 1663          && (path->device->serial_num_len > 0)) {
 1664                 /* Don't wrap the screen  - print only the first 60 chars */
 1665                 printf("%s%d: Serial Number %.60s\n", periph->periph_name,
 1666                        periph->unit_number, path->device->serial_num);
 1667         }
 1668         xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1);
 1669         cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
 1670         cts.flags = CCB_TRANS_CURRENT_SETTINGS;
 1671         xpt_action((union ccb*)&cts);
 1672         if (cts.ccb_h.status == CAM_REQ_CMP) {
 1673                 u_int speed;
 1674                 u_int freq;
 1675 
 1676                 if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
 1677                   && cts.sync_offset != 0) {
 1678                         freq = scsi_calc_syncsrate(cts.sync_period);
 1679                         speed = freq;
 1680                 } else {
 1681                         struct ccb_pathinq cpi;
 1682 
 1683                         /* Ask the SIM for its base transfer speed */
 1684                         xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
 1685                         cpi.ccb_h.func_code = XPT_PATH_INQ;
 1686                         xpt_action((union ccb *)&cpi);
 1687 
 1688                         speed = cpi.base_transfer_speed;
 1689                         freq = 0;
 1690                 }
 1691                 if ((cts.valid & CCB_TRANS_BUS_WIDTH_VALID) != 0)
 1692                         speed *= (0x01 << cts.bus_width);
 1693                 mb = speed / 1000;
 1694                 if (mb > 0)
 1695                         printf("%s%d: %d.%03dMB/s transfers",
 1696                                periph->periph_name, periph->unit_number,
 1697                                mb, speed % 1000);
 1698                 else
 1699                         printf("%s%d: %dKB/s transfers", periph->periph_name,
 1700                                periph->unit_number, speed);
 1701                 if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
 1702                  && cts.sync_offset != 0) {
 1703                         printf(" (%d.%03dMHz, offset %d", freq / 1000,
 1704                                freq % 1000, cts.sync_offset);
 1705                 }
 1706                 if ((cts.valid & CCB_TRANS_BUS_WIDTH_VALID) != 0
 1707                  && cts.bus_width > 0) {
 1708                         if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
 1709                          && cts.sync_offset != 0) {
 1710                                 printf(", ");
 1711                         } else {
 1712                                 printf(" (");
 1713                         }
 1714                         printf("%dbit)", 8 * (0x01 << cts.bus_width));
 1715                 } else if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
 1716                         && cts.sync_offset != 0) {
 1717                         printf(")");
 1718                 }
 1719 
 1720                 if (path->device->inq_flags & SID_CmdQue
 1721                  || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
 1722                         printf(", Tagged Queueing Enabled");
 1723                 }
 1724 
 1725                 printf("\n");
 1726         } else if (path->device->inq_flags & SID_CmdQue
 1727                 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
 1728                 printf("%s%d: Tagged Queueing Enabled\n",
 1729                        periph->periph_name, periph->unit_number);
 1730         }
 1731 
 1732         /*
 1733          * We only want to print the caller's announce string if they've
 1734          * passed one in..
 1735          */
 1736         if (announce_string != NULL)
 1737                 printf("%s%d: %s\n", periph->periph_name,
 1738                        periph->unit_number, announce_string);
 1739         splx(s);
 1740 }
 1741 
 1742 #endif /* CAM_NEW_TRAN_CODE */
 1743 
 1744 static dev_match_ret
 1745 xptbusmatch(struct dev_match_pattern *patterns, u_int num_patterns,
 1746             struct cam_eb *bus)
 1747 {
 1748         dev_match_ret retval;
 1749         int i;
 1750 
 1751         retval = DM_RET_NONE;
 1752 
 1753         /*
 1754          * If we aren't given something to match against, that's an error.
 1755          */
 1756         if (bus == NULL)
 1757                 return(DM_RET_ERROR);
 1758 
 1759         /*
 1760          * If there are no match entries, then this bus matches no
 1761          * matter what.
 1762          */
 1763         if ((patterns == NULL) || (num_patterns == 0))
 1764                 return(DM_RET_DESCEND | DM_RET_COPY);
 1765 
 1766         for (i = 0; i < num_patterns; i++) {
 1767                 struct bus_match_pattern *cur_pattern;
 1768 
 1769                 /*
 1770                  * If the pattern in question isn't for a bus node, we
 1771                  * aren't interested.  However, we do indicate to the
 1772                  * calling routine that we should continue descending the
 1773                  * tree, since the user wants to match against lower-level
 1774                  * EDT elements.
 1775                  */
 1776                 if (patterns[i].type != DEV_MATCH_BUS) {
 1777                         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
 1778                                 retval |= DM_RET_DESCEND;
 1779                         continue;
 1780                 }
 1781 
 1782                 cur_pattern = &patterns[i].pattern.bus_pattern;
 1783 
 1784                 /*
 1785                  * If they want to match any bus node, we give them any
 1786                  * device node.
 1787                  */
 1788                 if (cur_pattern->flags == BUS_MATCH_ANY) {
 1789                         /* set the copy flag */
 1790                         retval |= DM_RET_COPY;
 1791 
 1792                         /*
 1793                          * If we've already decided on an action, go ahead
 1794                          * and return.
 1795                          */
 1796                         if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
 1797                                 return(retval);
 1798                 }
 1799 
 1800                 /*
 1801                  * Not sure why someone would do this...
 1802                  */
 1803                 if (cur_pattern->flags == BUS_MATCH_NONE)
 1804                         continue;
 1805 
 1806                 if (((cur_pattern->flags & BUS_MATCH_PATH) != 0)
 1807                  && (cur_pattern->path_id != bus->path_id))
 1808                         continue;
 1809 
 1810                 if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0)
 1811                  && (cur_pattern->bus_id != bus->sim->bus_id))
 1812                         continue;
 1813 
 1814                 if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0)
 1815                  && (cur_pattern->unit_number != bus->sim->unit_number))
 1816                         continue;
 1817 
 1818                 if (((cur_pattern->flags & BUS_MATCH_NAME) != 0)
 1819                  && (strncmp(cur_pattern->dev_name, bus->sim->sim_name,
 1820                              DEV_IDLEN) != 0))
 1821                         continue;
 1822 
 1823                 /*
 1824                  * If we get to this point, the user definitely wants 
 1825                  * information on this bus.  So tell the caller to copy the
 1826                  * data out.
 1827                  */
 1828                 retval |= DM_RET_COPY;
 1829 
 1830                 /*
 1831                  * If the return action has been set to descend, then we
 1832                  * know that we've already seen a non-bus matching
 1833                  * expression, therefore we need to further descend the tree.
 1834                  * This won't change by continuing around the loop, so we
 1835                  * go ahead and return.  If we haven't seen a non-bus
 1836                  * matching expression, we keep going around the loop until
 1837                  * we exhaust the matching expressions.  We'll set the stop
 1838                  * flag once we fall out of the loop.
 1839                  */
 1840                 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
 1841                         return(retval);
 1842         }
 1843 
 1844         /*
 1845          * If the return action hasn't been set to descend yet, that means
 1846          * we haven't seen anything other than bus matching patterns.  So
 1847          * tell the caller to stop descending the tree -- the user doesn't
 1848          * want to match against lower level tree elements.
 1849          */
 1850         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
 1851                 retval |= DM_RET_STOP;
 1852 
 1853         return(retval);
 1854 }
 1855 
 1856 static dev_match_ret
 1857 xptdevicematch(struct dev_match_pattern *patterns, u_int num_patterns,
 1858                struct cam_ed *device)
 1859 {
 1860         dev_match_ret retval;
 1861         int i;
 1862 
 1863         retval = DM_RET_NONE;
 1864 
 1865         /*
 1866          * If we aren't given something to match against, that's an error.
 1867          */
 1868         if (device == NULL)
 1869                 return(DM_RET_ERROR);
 1870 
 1871         /*
 1872          * If there are no match entries, then this device matches no
 1873          * matter what.
 1874          */
 1875         if ((patterns == NULL) || (patterns == 0))
 1876                 return(DM_RET_DESCEND | DM_RET_COPY);
 1877 
 1878         for (i = 0; i < num_patterns; i++) {
 1879                 struct device_match_pattern *cur_pattern;
 1880 
 1881                 /*
 1882                  * If the pattern in question isn't for a device node, we
 1883                  * aren't interested.
 1884                  */
 1885                 if (patterns[i].type != DEV_MATCH_DEVICE) {
 1886                         if ((patterns[i].type == DEV_MATCH_PERIPH)
 1887                          && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE))
 1888                                 retval |= DM_RET_DESCEND;
 1889                         continue;
 1890                 }
 1891 
 1892                 cur_pattern = &patterns[i].pattern.device_pattern;
 1893 
 1894                 /*
 1895                  * If they want to match any device node, we give them any
 1896                  * device node.
 1897                  */
 1898                 if (cur_pattern->flags == DEV_MATCH_ANY) {
 1899                         /* set the copy flag */
 1900                         retval |= DM_RET_COPY;
 1901 
 1902                         
 1903                         /*
 1904                          * If we've already decided on an action, go ahead
 1905                          * and return.
 1906                          */
 1907                         if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
 1908                                 return(retval);
 1909                 }
 1910 
 1911                 /*
 1912                  * Not sure why someone would do this...
 1913                  */
 1914                 if (cur_pattern->flags == DEV_MATCH_NONE)
 1915                         continue;
 1916 
 1917                 if (((cur_pattern->flags & DEV_MATCH_PATH) != 0)
 1918                  && (cur_pattern->path_id != device->target->bus->path_id))
 1919                         continue;
 1920 
 1921                 if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0)
 1922                  && (cur_pattern->target_id != device->target->target_id))
 1923                         continue;
 1924 
 1925                 if (((cur_pattern->flags & DEV_MATCH_LUN) != 0)
 1926                  && (cur_pattern->target_lun != device->lun_id))
 1927                         continue;
 1928 
 1929                 if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0)
 1930                  && (cam_quirkmatch((caddr_t)&device->inq_data,
 1931                                     (caddr_t)&cur_pattern->inq_pat,
 1932                                     1, sizeof(cur_pattern->inq_pat),
 1933                                     scsi_static_inquiry_match) == NULL))
 1934                         continue;
 1935 
 1936                 /*
 1937                  * If we get to this point, the user definitely wants 
 1938                  * information on this device.  So tell the caller to copy
 1939                  * the data out.
 1940                  */
 1941                 retval |= DM_RET_COPY;
 1942 
 1943                 /*
 1944                  * If the return action has been set to descend, then we
 1945                  * know that we've already seen a peripheral matching
 1946                  * expression, therefore we need to further descend the tree.
 1947                  * This won't change by continuing around the loop, so we
 1948                  * go ahead and return.  If we haven't seen a peripheral
 1949                  * matching expression, we keep going around the loop until
 1950                  * we exhaust the matching expressions.  We'll set the stop
 1951                  * flag once we fall out of the loop.
 1952                  */
 1953                 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
 1954                         return(retval);
 1955         }
 1956 
 1957         /*
 1958          * If the return action hasn't been set to descend yet, that means
 1959          * we haven't seen any peripheral matching patterns.  So tell the
 1960          * caller to stop descending the tree -- the user doesn't want to
 1961          * match against lower level tree elements.
 1962          */
 1963         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
 1964                 retval |= DM_RET_STOP;
 1965 
 1966         return(retval);
 1967 }
 1968 
 1969 /*
 1970  * Match a single peripheral against any number of match patterns.
 1971  */
 1972 static dev_match_ret
 1973 xptperiphmatch(struct dev_match_pattern *patterns, u_int num_patterns,
 1974                struct cam_periph *periph)
 1975 {
 1976         dev_match_ret retval;
 1977         int i;
 1978 
 1979         /*
 1980          * If we aren't given something to match against, that's an error.
 1981          */
 1982         if (periph == NULL)
 1983                 return(DM_RET_ERROR);
 1984 
 1985         /*
 1986          * If there are no match entries, then this peripheral matches no
 1987          * matter what.
 1988          */
 1989         if ((patterns == NULL) || (num_patterns == 0))
 1990                 return(DM_RET_STOP | DM_RET_COPY);
 1991 
 1992         /*
 1993          * There aren't any nodes below a peripheral node, so there's no
 1994          * reason to descend the tree any further.
 1995          */
 1996         retval = DM_RET_STOP;
 1997 
 1998         for (i = 0; i < num_patterns; i++) {
 1999                 struct periph_match_pattern *cur_pattern;
 2000 
 2001                 /*
 2002                  * If the pattern in question isn't for a peripheral, we
 2003                  * aren't interested.
 2004                  */
 2005                 if (patterns[i].type != DEV_MATCH_PERIPH)
 2006                         continue;
 2007 
 2008                 cur_pattern = &patterns[i].pattern.periph_pattern;
 2009 
 2010                 /*
 2011                  * If they want to match on anything, then we will do so.
 2012                  */
 2013                 if (cur_pattern->flags == PERIPH_MATCH_ANY) {
 2014                         /* set the copy flag */
 2015                         retval |= DM_RET_COPY;
 2016 
 2017                         /*
 2018                          * We've already set the return action to stop,
 2019                          * since there are no nodes below peripherals in
 2020                          * the tree.
 2021                          */
 2022                         return(retval);
 2023                 }
 2024 
 2025                 /*
 2026                  * Not sure why someone would do this...
 2027                  */
 2028                 if (cur_pattern->flags == PERIPH_MATCH_NONE)
 2029                         continue;
 2030 
 2031                 if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0)
 2032                  && (cur_pattern->path_id != periph->path->bus->path_id))
 2033                         continue;
 2034 
 2035                 /*
 2036                  * For the target and lun id's, we have to make sure the
 2037                  * target and lun pointers aren't NULL.  The xpt peripheral
 2038                  * has a wildcard target and device.
 2039                  */
 2040                 if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0)
 2041                  && ((periph->path->target == NULL)
 2042                  ||(cur_pattern->target_id != periph->path->target->target_id)))
 2043                         continue;
 2044 
 2045                 if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0)
 2046                  && ((periph->path->device == NULL)
 2047                  || (cur_pattern->target_lun != periph->path->device->lun_id)))
 2048                         continue;
 2049 
 2050                 if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0)
 2051                  && (cur_pattern->unit_number != periph->unit_number))
 2052                         continue;
 2053 
 2054                 if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0)
 2055                  && (strncmp(cur_pattern->periph_name, periph->periph_name,
 2056                              DEV_IDLEN) != 0))
 2057                         continue;
 2058 
 2059                 /*
 2060                  * If we get to this point, the user definitely wants 
 2061                  * information on this peripheral.  So tell the caller to
 2062                  * copy the data out.
 2063                  */
 2064                 retval |= DM_RET_COPY;
 2065 
 2066                 /*
 2067                  * The return action has already been set to stop, since
 2068                  * peripherals don't have any nodes below them in the EDT.
 2069                  */
 2070                 return(retval);
 2071         }
 2072 
 2073         /*
 2074          * If we get to this point, the peripheral that was passed in
 2075          * doesn't match any of the patterns.
 2076          */
 2077         return(retval);
 2078 }
 2079 
 2080 static int
 2081 xptedtbusfunc(struct cam_eb *bus, void *arg)
 2082 {
 2083         struct ccb_dev_match *cdm;
 2084         dev_match_ret retval;
 2085 
 2086         cdm = (struct ccb_dev_match *)arg;
 2087 
 2088         /*
 2089          * If our position is for something deeper in the tree, that means
 2090          * that we've already seen this node.  So, we keep going down.
 2091          */
 2092         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 2093          && (cdm->pos.cookie.bus == bus)
 2094          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 2095          && (cdm->pos.cookie.target != NULL))
 2096                 retval = DM_RET_DESCEND;
 2097         else
 2098                 retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus);
 2099 
 2100         /*
 2101          * If we got an error, bail out of the search.
 2102          */
 2103         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
 2104                 cdm->status = CAM_DEV_MATCH_ERROR;
 2105                 return(0);
 2106         }
 2107 
 2108         /*
 2109          * If the copy flag is set, copy this bus out.
 2110          */
 2111         if (retval & DM_RET_COPY) {
 2112                 int spaceleft, j;
 2113 
 2114                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
 2115                         sizeof(struct dev_match_result));
 2116 
 2117                 /*
 2118                  * If we don't have enough space to put in another
 2119                  * match result, save our position and tell the
 2120                  * user there are more devices to check.
 2121                  */
 2122                 if (spaceleft < sizeof(struct dev_match_result)) {
 2123                         bzero(&cdm->pos, sizeof(cdm->pos));
 2124                         cdm->pos.position_type = 
 2125                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS;
 2126 
 2127                         cdm->pos.cookie.bus = bus;
 2128                         cdm->pos.generations[CAM_BUS_GENERATION]=
 2129                                 bus_generation;
 2130                         cdm->status = CAM_DEV_MATCH_MORE;
 2131                         return(0);
 2132                 }
 2133                 j = cdm->num_matches;
 2134                 cdm->num_matches++;
 2135                 cdm->matches[j].type = DEV_MATCH_BUS;
 2136                 cdm->matches[j].result.bus_result.path_id = bus->path_id;
 2137                 cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id;
 2138                 cdm->matches[j].result.bus_result.unit_number =
 2139                         bus->sim->unit_number;
 2140                 strncpy(cdm->matches[j].result.bus_result.dev_name,
 2141                         bus->sim->sim_name, DEV_IDLEN);
 2142         }
 2143 
 2144         /*
 2145          * If the user is only interested in busses, there's no
 2146          * reason to descend to the next level in the tree.
 2147          */
 2148         if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
 2149                 return(1);
 2150 
 2151         /*
 2152          * If there is a target generation recorded, check it to
 2153          * make sure the target list hasn't changed.
 2154          */
 2155         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 2156          && (bus == cdm->pos.cookie.bus)
 2157          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 2158          && (cdm->pos.generations[CAM_TARGET_GENERATION] != 0)
 2159          && (cdm->pos.generations[CAM_TARGET_GENERATION] !=
 2160              bus->generation)) {
 2161                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
 2162                 return(0);
 2163         }
 2164 
 2165         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 2166          && (cdm->pos.cookie.bus == bus)
 2167          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 2168          && (cdm->pos.cookie.target != NULL))
 2169                 return(xpttargettraverse(bus,
 2170                                         (struct cam_et *)cdm->pos.cookie.target,
 2171                                          xptedttargetfunc, arg));
 2172         else
 2173                 return(xpttargettraverse(bus, NULL, xptedttargetfunc, arg));
 2174 }
 2175 
 2176 static int
 2177 xptedttargetfunc(struct cam_et *target, void *arg)
 2178 {
 2179         struct ccb_dev_match *cdm;
 2180 
 2181         cdm = (struct ccb_dev_match *)arg;
 2182 
 2183         /*
 2184          * If there is a device list generation recorded, check it to
 2185          * make sure the device list hasn't changed.
 2186          */
 2187         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 2188          && (cdm->pos.cookie.bus == target->bus)
 2189          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 2190          && (cdm->pos.cookie.target == target)
 2191          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
 2192          && (cdm->pos.generations[CAM_DEV_GENERATION] != 0)
 2193          && (cdm->pos.generations[CAM_DEV_GENERATION] !=
 2194              target->generation)) {
 2195                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
 2196                 return(0);
 2197         }
 2198 
 2199         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 2200          && (cdm->pos.cookie.bus == target->bus)
 2201          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 2202          && (cdm->pos.cookie.target == target)
 2203          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
 2204          && (cdm->pos.cookie.device != NULL))
 2205                 return(xptdevicetraverse(target,
 2206                                         (struct cam_ed *)cdm->pos.cookie.device,
 2207                                          xptedtdevicefunc, arg));
 2208         else
 2209                 return(xptdevicetraverse(target, NULL, xptedtdevicefunc, arg));
 2210 }
 2211 
 2212 static int
 2213 xptedtdevicefunc(struct cam_ed *device, void *arg)
 2214 {
 2215 
 2216         struct ccb_dev_match *cdm;
 2217         dev_match_ret retval;
 2218 
 2219         cdm = (struct ccb_dev_match *)arg;
 2220 
 2221         /*
 2222          * If our position is for something deeper in the tree, that means
 2223          * that we've already seen this node.  So, we keep going down.
 2224          */
 2225         if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE)
 2226          && (cdm->pos.cookie.device == device)
 2227          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
 2228          && (cdm->pos.cookie.periph != NULL))
 2229                 retval = DM_RET_DESCEND;
 2230         else
 2231                 retval = xptdevicematch(cdm->patterns, cdm->num_patterns,
 2232                                         device);
 2233 
 2234         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
 2235                 cdm->status = CAM_DEV_MATCH_ERROR;
 2236                 return(0);
 2237         }
 2238 
 2239         /*
 2240          * If the copy flag is set, copy this device out.
 2241          */
 2242         if (retval & DM_RET_COPY) {
 2243                 int spaceleft, j;
 2244 
 2245                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
 2246                         sizeof(struct dev_match_result));
 2247 
 2248                 /*
 2249                  * If we don't have enough space to put in another
 2250                  * match result, save our position and tell the
 2251                  * user there are more devices to check.
 2252                  */
 2253                 if (spaceleft < sizeof(struct dev_match_result)) {
 2254                         bzero(&cdm->pos, sizeof(cdm->pos));
 2255                         cdm->pos.position_type = 
 2256                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
 2257                                 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE;
 2258 
 2259                         cdm->pos.cookie.bus = device->target->bus;
 2260                         cdm->pos.generations[CAM_BUS_GENERATION]=
 2261                                 bus_generation;
 2262                         cdm->pos.cookie.target = device->target;
 2263                         cdm->pos.generations[CAM_TARGET_GENERATION] =
 2264                                 device->target->bus->generation;
 2265                         cdm->pos.cookie.device = device;
 2266                         cdm->pos.generations[CAM_DEV_GENERATION] = 
 2267                                 device->target->generation;
 2268                         cdm->status = CAM_DEV_MATCH_MORE;
 2269                         return(0);
 2270                 }
 2271                 j = cdm->num_matches;
 2272                 cdm->num_matches++;
 2273                 cdm->matches[j].type = DEV_MATCH_DEVICE;
 2274                 cdm->matches[j].result.device_result.path_id =
 2275                         device->target->bus->path_id;
 2276                 cdm->matches[j].result.device_result.target_id =
 2277                         device->target->target_id;
 2278                 cdm->matches[j].result.device_result.target_lun =
 2279                         device->lun_id;
 2280                 bcopy(&device->inq_data,
 2281                       &cdm->matches[j].result.device_result.inq_data,
 2282                       sizeof(struct scsi_inquiry_data));
 2283 
 2284                 /* Let the user know whether this device is unconfigured */
 2285                 if (device->flags & CAM_DEV_UNCONFIGURED)
 2286                         cdm->matches[j].result.device_result.flags =
 2287                                 DEV_RESULT_UNCONFIGURED;
 2288                 else
 2289                         cdm->matches[j].result.device_result.flags =
 2290                                 DEV_RESULT_NOFLAG;
 2291         }
 2292 
 2293         /*
 2294          * If the user isn't interested in peripherals, don't descend
 2295          * the tree any further.
 2296          */
 2297         if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
 2298                 return(1);
 2299 
 2300         /*
 2301          * If there is a peripheral list generation recorded, make sure
 2302          * it hasn't changed.
 2303          */
 2304         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 2305          && (device->target->bus == cdm->pos.cookie.bus)
 2306          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 2307          && (device->target == cdm->pos.cookie.target)
 2308          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
 2309          && (device == cdm->pos.cookie.device)
 2310          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
 2311          && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
 2312          && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
 2313              device->generation)){
 2314                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
 2315                 return(0);
 2316         }
 2317 
 2318         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 2319          && (cdm->pos.cookie.bus == device->target->bus)
 2320          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 2321          && (cdm->pos.cookie.target == device->target)
 2322          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
 2323          && (cdm->pos.cookie.device == device)
 2324          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
 2325          && (cdm->pos.cookie.periph != NULL))
 2326                 return(xptperiphtraverse(device,
 2327                                 (struct cam_periph *)cdm->pos.cookie.periph,
 2328                                 xptedtperiphfunc, arg));
 2329         else
 2330                 return(xptperiphtraverse(device, NULL, xptedtperiphfunc, arg));
 2331 }
 2332 
 2333 static int
 2334 xptedtperiphfunc(struct cam_periph *periph, void *arg)
 2335 {
 2336         struct ccb_dev_match *cdm;
 2337         dev_match_ret retval;
 2338 
 2339         cdm = (struct ccb_dev_match *)arg;
 2340 
 2341         retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
 2342 
 2343         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
 2344                 cdm->status = CAM_DEV_MATCH_ERROR;
 2345                 return(0);
 2346         }
 2347 
 2348         /*
 2349          * If the copy flag is set, copy this peripheral out.
 2350          */
 2351         if (retval & DM_RET_COPY) {
 2352                 int spaceleft, j;
 2353 
 2354                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
 2355                         sizeof(struct dev_match_result));
 2356 
 2357                 /*
 2358                  * If we don't have enough space to put in another
 2359                  * match result, save our position and tell the
 2360                  * user there are more devices to check.
 2361                  */
 2362                 if (spaceleft < sizeof(struct dev_match_result)) {
 2363                         bzero(&cdm->pos, sizeof(cdm->pos));
 2364                         cdm->pos.position_type = 
 2365                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
 2366                                 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE |
 2367                                 CAM_DEV_POS_PERIPH;
 2368 
 2369                         cdm->pos.cookie.bus = periph->path->bus;
 2370                         cdm->pos.generations[CAM_BUS_GENERATION]=
 2371                                 bus_generation;
 2372                         cdm->pos.cookie.target = periph->path->target;
 2373                         cdm->pos.generations[CAM_TARGET_GENERATION] =
 2374                                 periph->path->bus->generation;
 2375                         cdm->pos.cookie.device = periph->path->device;
 2376                         cdm->pos.generations[CAM_DEV_GENERATION] = 
 2377                                 periph->path->target->generation;
 2378                         cdm->pos.cookie.periph = periph;
 2379                         cdm->pos.generations[CAM_PERIPH_GENERATION] =
 2380                                 periph->path->device->generation;
 2381                         cdm->status = CAM_DEV_MATCH_MORE;
 2382                         return(0);
 2383                 }
 2384 
 2385                 j = cdm->num_matches;
 2386                 cdm->num_matches++;
 2387                 cdm->matches[j].type = DEV_MATCH_PERIPH;
 2388                 cdm->matches[j].result.periph_result.path_id =
 2389                         periph->path->bus->path_id;
 2390                 cdm->matches[j].result.periph_result.target_id =
 2391                         periph->path->target->target_id;
 2392                 cdm->matches[j].result.periph_result.target_lun =
 2393                         periph->path->device->lun_id;
 2394                 cdm->matches[j].result.periph_result.unit_number =
 2395                         periph->unit_number;
 2396                 strncpy(cdm->matches[j].result.periph_result.periph_name,
 2397                         periph->periph_name, DEV_IDLEN);
 2398         }
 2399 
 2400         return(1);
 2401 }
 2402 
 2403 static int
 2404 xptedtmatch(struct ccb_dev_match *cdm)
 2405 {
 2406         int ret;
 2407 
 2408         cdm->num_matches = 0;
 2409 
 2410         /*
 2411          * Check the bus list generation.  If it has changed, the user
 2412          * needs to reset everything and start over.
 2413          */
 2414         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 2415          && (cdm->pos.generations[CAM_BUS_GENERATION] != 0)
 2416          && (cdm->pos.generations[CAM_BUS_GENERATION] != bus_generation)) {
 2417                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
 2418                 return(0);
 2419         }
 2420 
 2421         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 2422          && (cdm->pos.cookie.bus != NULL))
 2423                 ret = xptbustraverse((struct cam_eb *)cdm->pos.cookie.bus,
 2424                                      xptedtbusfunc, cdm);
 2425         else
 2426                 ret = xptbustraverse(NULL, xptedtbusfunc, cdm);
 2427 
 2428         /*
 2429          * If we get back 0, that means that we had to stop before fully
 2430          * traversing the EDT.  It also means that one of the subroutines
 2431          * has set the status field to the proper value.  If we get back 1,
 2432          * we've fully traversed the EDT and copied out any matching entries.
 2433          */
 2434         if (ret == 1)
 2435                 cdm->status = CAM_DEV_MATCH_LAST;
 2436 
 2437         return(ret);
 2438 }
 2439 
 2440 static int
 2441 xptplistpdrvfunc(struct periph_driver **pdrv, void *arg)
 2442 {
 2443         struct ccb_dev_match *cdm;
 2444 
 2445         cdm = (struct ccb_dev_match *)arg;
 2446 
 2447         if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
 2448          && (cdm->pos.cookie.pdrv == pdrv)
 2449          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
 2450          && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
 2451          && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
 2452              (*pdrv)->generation)) {
 2453                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
 2454                 return(0);
 2455         }
 2456 
 2457         if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
 2458          && (cdm->pos.cookie.pdrv == pdrv)
 2459          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
 2460          && (cdm->pos.cookie.periph != NULL))
 2461                 return(xptpdperiphtraverse(pdrv,
 2462                                 (struct cam_periph *)cdm->pos.cookie.periph,
 2463                                 xptplistperiphfunc, arg));
 2464         else
 2465                 return(xptpdperiphtraverse(pdrv, NULL,xptplistperiphfunc, arg));
 2466 }
 2467 
 2468 static int
 2469 xptplistperiphfunc(struct cam_periph *periph, void *arg)
 2470 {
 2471         struct ccb_dev_match *cdm;
 2472         dev_match_ret retval;
 2473 
 2474         cdm = (struct ccb_dev_match *)arg;
 2475 
 2476         retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
 2477 
 2478         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
 2479                 cdm->status = CAM_DEV_MATCH_ERROR;
 2480                 return(0);
 2481         }
 2482 
 2483         /*
 2484          * If the copy flag is set, copy this peripheral out.
 2485          */
 2486         if (retval & DM_RET_COPY) {
 2487                 int spaceleft, j;
 2488 
 2489                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
 2490                         sizeof(struct dev_match_result));
 2491 
 2492                 /*
 2493                  * If we don't have enough space to put in another
 2494                  * match result, save our position and tell the
 2495                  * user there are more devices to check.
 2496                  */
 2497                 if (spaceleft < sizeof(struct dev_match_result)) {
 2498                         struct periph_driver **pdrv;
 2499 
 2500                         pdrv = NULL;
 2501                         bzero(&cdm->pos, sizeof(cdm->pos));
 2502                         cdm->pos.position_type = 
 2503                                 CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR |
 2504                                 CAM_DEV_POS_PERIPH;
 2505 
 2506                         /*
 2507                          * This may look a bit non-sensical, but it is
 2508                          * actually quite logical.  There are very few
 2509                          * peripheral drivers, and bloating every peripheral
 2510                          * structure with a pointer back to its parent
 2511                          * peripheral driver linker set entry would cost
 2512                          * more in the long run than doing this quick lookup.
 2513                          */
 2514                         for (pdrv = periph_drivers; *pdrv != NULL; pdrv++) {
 2515                                 if (strcmp((*pdrv)->driver_name,
 2516                                     periph->periph_name) == 0)
 2517                                         break;
 2518                         }
 2519 
 2520                         if (pdrv == NULL) {
 2521                                 cdm->status = CAM_DEV_MATCH_ERROR;
 2522                                 return(0);
 2523                         }
 2524 
 2525                         cdm->pos.cookie.pdrv = pdrv;
 2526                         /*
 2527                          * The periph generation slot does double duty, as
 2528                          * does the periph pointer slot.  They are used for
 2529                          * both edt and pdrv lookups and positioning.
 2530                          */
 2531                         cdm->pos.cookie.periph = periph;
 2532                         cdm->pos.generations[CAM_PERIPH_GENERATION] =
 2533                                 (*pdrv)->generation;
 2534                         cdm->status = CAM_DEV_MATCH_MORE;
 2535                         return(0);
 2536                 }
 2537 
 2538                 j = cdm->num_matches;
 2539                 cdm->num_matches++;
 2540                 cdm->matches[j].type = DEV_MATCH_PERIPH;
 2541                 cdm->matches[j].result.periph_result.path_id =
 2542                         periph->path->bus->path_id;
 2543 
 2544                 /*
 2545                  * The transport layer peripheral doesn't have a target or
 2546                  * lun.
 2547                  */
 2548                 if (periph->path->target)
 2549                         cdm->matches[j].result.periph_result.target_id =
 2550                                 periph->path->target->target_id;
 2551                 else
 2552                         cdm->matches[j].result.periph_result.target_id = -1;
 2553 
 2554                 if (periph->path->device)
 2555                         cdm->matches[j].result.periph_result.target_lun =
 2556                                 periph->path->device->lun_id;
 2557                 else
 2558                         cdm->matches[j].result.periph_result.target_lun = -1;
 2559 
 2560                 cdm->matches[j].result.periph_result.unit_number =
 2561                         periph->unit_number;
 2562                 strncpy(cdm->matches[j].result.periph_result.periph_name,
 2563                         periph->periph_name, DEV_IDLEN);
 2564         }
 2565 
 2566         return(1);
 2567 }
 2568 
 2569 static int
 2570 xptperiphlistmatch(struct ccb_dev_match *cdm)
 2571 {
 2572         int ret;
 2573 
 2574         cdm->num_matches = 0;
 2575 
 2576         /*
 2577          * At this point in the edt traversal function, we check the bus
 2578          * list generation to make sure that no busses have been added or
 2579          * removed since the user last sent a XPT_DEV_MATCH ccb through.
 2580          * For the peripheral driver list traversal function, however, we
 2581          * don't have to worry about new peripheral driver types coming or
 2582          * going; they're in a linker set, and therefore can't change
 2583          * without a recompile.
 2584          */
 2585 
 2586         if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
 2587          && (cdm->pos.cookie.pdrv != NULL))
 2588                 ret = xptpdrvtraverse(
 2589                                 (struct periph_driver **)cdm->pos.cookie.pdrv,
 2590                                 xptplistpdrvfunc, cdm);
 2591         else
 2592                 ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm);
 2593 
 2594         /*
 2595          * If we get back 0, that means that we had to stop before fully
 2596          * traversing the peripheral driver tree.  It also means that one of
 2597          * the subroutines has set the status field to the proper value.  If
 2598          * we get back 1, we've fully traversed the EDT and copied out any
 2599          * matching entries.
 2600          */
 2601         if (ret == 1)
 2602                 cdm->status = CAM_DEV_MATCH_LAST;
 2603 
 2604         return(ret);
 2605 }
 2606 
 2607 static int
 2608 xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg)
 2609 {
 2610         struct cam_eb *bus, *next_bus;
 2611         int retval;
 2612 
 2613         retval = 1;
 2614 
 2615         for (bus = (start_bus ? start_bus : TAILQ_FIRST(&xpt_busses));
 2616              bus != NULL;
 2617              bus = next_bus) {
 2618                 next_bus = TAILQ_NEXT(bus, links);
 2619 
 2620                 retval = tr_func(bus, arg);
 2621                 if (retval == 0)
 2622                         return(retval);
 2623         }
 2624 
 2625         return(retval);
 2626 }
 2627 
 2628 static int
 2629 xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target,
 2630                   xpt_targetfunc_t *tr_func, void *arg)
 2631 {
 2632         struct cam_et *target, *next_target;
 2633         int retval;
 2634 
 2635         retval = 1;
 2636         for (target = (start_target ? start_target :
 2637                        TAILQ_FIRST(&bus->et_entries));
 2638              target != NULL; target = next_target) {
 2639 
 2640                 next_target = TAILQ_NEXT(target, links);
 2641 
 2642                 retval = tr_func(target, arg);
 2643 
 2644                 if (retval == 0)
 2645                         return(retval);
 2646         }
 2647 
 2648         return(retval);
 2649 }
 2650 
 2651 static int
 2652 xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device,
 2653                   xpt_devicefunc_t *tr_func, void *arg)
 2654 {
 2655         struct cam_ed *device, *next_device;
 2656         int retval;
 2657 
 2658         retval = 1;
 2659         for (device = (start_device ? start_device :
 2660                        TAILQ_FIRST(&target->ed_entries));
 2661              device != NULL;
 2662              device = next_device) {
 2663 
 2664                 next_device = TAILQ_NEXT(device, links);
 2665 
 2666                 retval = tr_func(device, arg);
 2667 
 2668                 if (retval == 0)
 2669                         return(retval);
 2670         }
 2671 
 2672         return(retval);
 2673 }
 2674 
 2675 static int
 2676 xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph,
 2677                   xpt_periphfunc_t *tr_func, void *arg)
 2678 {
 2679         struct cam_periph *periph, *next_periph;
 2680         int retval;
 2681 
 2682         retval = 1;
 2683 
 2684         for (periph = (start_periph ? start_periph :
 2685                        SLIST_FIRST(&device->periphs));
 2686              periph != NULL;
 2687              periph = next_periph) {
 2688 
 2689                 next_periph = SLIST_NEXT(periph, periph_links);
 2690 
 2691                 retval = tr_func(periph, arg);
 2692                 if (retval == 0)
 2693                         return(retval);
 2694         }
 2695 
 2696         return(retval);
 2697 }
 2698 
 2699 static int
 2700 xptpdrvtraverse(struct periph_driver **start_pdrv,
 2701                 xpt_pdrvfunc_t *tr_func, void *arg)
 2702 {
 2703         struct periph_driver **pdrv;
 2704         int retval;
 2705 
 2706         retval = 1;
 2707 
 2708         /*
 2709          * We don't traverse the peripheral driver list like we do the
 2710          * other lists, because it is a linker set, and therefore cannot be
 2711          * changed during runtime.  If the peripheral driver list is ever
 2712          * re-done to be something other than a linker set (i.e. it can
 2713          * change while the system is running), the list traversal should
 2714          * be modified to work like the other traversal functions.
 2715          */
 2716         for (pdrv = (start_pdrv ? start_pdrv : periph_drivers);
 2717              *pdrv != NULL; pdrv++) {
 2718                 retval = tr_func(pdrv, arg);
 2719 
 2720                 if (retval == 0)
 2721                         return(retval);
 2722         }
 2723 
 2724         return(retval);
 2725 }
 2726 
 2727 static int
 2728 xptpdperiphtraverse(struct periph_driver **pdrv,
 2729                     struct cam_periph *start_periph,
 2730                     xpt_periphfunc_t *tr_func, void *arg)
 2731 {
 2732         struct cam_periph *periph, *next_periph;
 2733         int retval;
 2734 
 2735         retval = 1;
 2736 
 2737         for (periph = (start_periph ? start_periph :
 2738              TAILQ_FIRST(&(*pdrv)->units)); periph != NULL;
 2739              periph = next_periph) {
 2740 
 2741                 next_periph = TAILQ_NEXT(periph, unit_links);
 2742 
 2743                 retval = tr_func(periph, arg);
 2744                 if (retval == 0)
 2745                         return(retval);
 2746         }
 2747         return(retval);
 2748 }
 2749 
 2750 static int
 2751 xptdefbusfunc(struct cam_eb *bus, void *arg)
 2752 {
 2753         struct xpt_traverse_config *tr_config;
 2754 
 2755         tr_config = (struct xpt_traverse_config *)arg;
 2756 
 2757         if (tr_config->depth == XPT_DEPTH_BUS) {
 2758                 xpt_busfunc_t *tr_func;
 2759 
 2760                 tr_func = (xpt_busfunc_t *)tr_config->tr_func;
 2761 
 2762                 return(tr_func(bus, tr_config->tr_arg));
 2763         } else
 2764                 return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg));
 2765 }
 2766 
 2767 static int
 2768 xptdeftargetfunc(struct cam_et *target, void *arg)
 2769 {
 2770         struct xpt_traverse_config *tr_config;
 2771 
 2772         tr_config = (struct xpt_traverse_config *)arg;
 2773 
 2774         if (tr_config->depth == XPT_DEPTH_TARGET) {
 2775                 xpt_targetfunc_t *tr_func;
 2776 
 2777                 tr_func = (xpt_targetfunc_t *)tr_config->tr_func;
 2778 
 2779                 return(tr_func(target, tr_config->tr_arg));
 2780         } else
 2781                 return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg));
 2782 }
 2783 
 2784 static int
 2785 xptdefdevicefunc(struct cam_ed *device, void *arg)
 2786 {
 2787         struct xpt_traverse_config *tr_config;
 2788 
 2789         tr_config = (struct xpt_traverse_config *)arg;
 2790 
 2791         if (tr_config->depth == XPT_DEPTH_DEVICE) {
 2792                 xpt_devicefunc_t *tr_func;
 2793 
 2794                 tr_func = (xpt_devicefunc_t *)tr_config->tr_func;
 2795 
 2796                 return(tr_func(device, tr_config->tr_arg));
 2797         } else
 2798                 return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg));
 2799 }
 2800 
 2801 static int
 2802 xptdefperiphfunc(struct cam_periph *periph, void *arg)
 2803 {
 2804         struct xpt_traverse_config *tr_config;
 2805         xpt_periphfunc_t *tr_func;
 2806 
 2807         tr_config = (struct xpt_traverse_config *)arg;
 2808 
 2809         tr_func = (xpt_periphfunc_t *)tr_config->tr_func;
 2810 
 2811         /*
 2812          * Unlike the other default functions, we don't check for depth
 2813          * here.  The peripheral driver level is the last level in the EDT,
 2814          * so if we're here, we should execute the function in question.
 2815          */
 2816         return(tr_func(periph, tr_config->tr_arg));
 2817 }
 2818 
 2819 /*
 2820  * Execute the given function for every bus in the EDT.
 2821  */
 2822 static int
 2823 xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg)
 2824 {
 2825         struct xpt_traverse_config tr_config;
 2826 
 2827         tr_config.depth = XPT_DEPTH_BUS;
 2828         tr_config.tr_func = tr_func;
 2829         tr_config.tr_arg = arg;
 2830 
 2831         return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
 2832 }
 2833 
 2834 #ifdef notusedyet
 2835 /*
 2836  * Execute the given function for every target in the EDT.
 2837  */
 2838 static int
 2839 xpt_for_all_targets(xpt_targetfunc_t *tr_func, void *arg)
 2840 {
 2841         struct xpt_traverse_config tr_config;
 2842 
 2843         tr_config.depth = XPT_DEPTH_TARGET;
 2844         tr_config.tr_func = tr_func;
 2845         tr_config.tr_arg = arg;
 2846 
 2847         return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
 2848 }
 2849 #endif /* notusedyet */
 2850 
 2851 /*
 2852  * Execute the given function for every device in the EDT.
 2853  */
 2854 static int
 2855 xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg)
 2856 {
 2857         struct xpt_traverse_config tr_config;
 2858 
 2859         tr_config.depth = XPT_DEPTH_DEVICE;
 2860         tr_config.tr_func = tr_func;
 2861         tr_config.tr_arg = arg;
 2862 
 2863         return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
 2864 }
 2865 
 2866 #ifdef notusedyet
 2867 /*
 2868  * Execute the given function for every peripheral in the EDT.
 2869  */
 2870 static int
 2871 xpt_for_all_periphs(xpt_periphfunc_t *tr_func, void *arg)
 2872 {
 2873         struct xpt_traverse_config tr_config;
 2874 
 2875         tr_config.depth = XPT_DEPTH_PERIPH;
 2876         tr_config.tr_func = tr_func;
 2877         tr_config.tr_arg = arg;
 2878 
 2879         return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
 2880 }
 2881 #endif /* notusedyet */
 2882 
 2883 static int
 2884 xptsetasyncfunc(struct cam_ed *device, void *arg)
 2885 {
 2886         struct cam_path path;
 2887         struct ccb_getdev cgd;
 2888         struct async_node *cur_entry;
 2889 
 2890         cur_entry = (struct async_node *)arg;
 2891 
 2892         /*
 2893          * Don't report unconfigured devices (Wildcard devs,
 2894          * devices only for target mode, device instances
 2895          * that have been invalidated but are waiting for
 2896          * their last reference count to be released).
 2897          */
 2898         if ((device->flags & CAM_DEV_UNCONFIGURED) != 0)
 2899                 return (1);
 2900 
 2901         xpt_compile_path(&path,
 2902                          NULL,
 2903                          device->target->bus->path_id,
 2904                          device->target->target_id,
 2905                          device->lun_id);
 2906         xpt_setup_ccb(&cgd.ccb_h, &path, /*priority*/1);
 2907         cgd.ccb_h.func_code = XPT_GDEV_TYPE;
 2908         xpt_action((union ccb *)&cgd);
 2909         cur_entry->callback(cur_entry->callback_arg,
 2910                             AC_FOUND_DEVICE,
 2911                             &path, &cgd);
 2912         xpt_release_path(&path);
 2913 
 2914         return(1);
 2915 }
 2916 
 2917 static int
 2918 xptsetasyncbusfunc(struct cam_eb *bus, void *arg)
 2919 {
 2920         struct cam_path path;
 2921         struct ccb_pathinq cpi;
 2922         struct async_node *cur_entry;
 2923 
 2924         cur_entry = (struct async_node *)arg;
 2925 
 2926         xpt_compile_path(&path, /*periph*/NULL,
 2927                          bus->sim->path_id,
 2928                          CAM_TARGET_WILDCARD,
 2929                          CAM_LUN_WILDCARD);
 2930         xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
 2931         cpi.ccb_h.func_code = XPT_PATH_INQ;
 2932         xpt_action((union ccb *)&cpi);
 2933         cur_entry->callback(cur_entry->callback_arg,
 2934                             AC_PATH_REGISTERED,
 2935                             &path, &cpi);
 2936         xpt_release_path(&path);
 2937 
 2938         return(1);
 2939 }
 2940 
 2941 void
 2942 xpt_action(union ccb *start_ccb)
 2943 {
 2944         int iopl;
 2945 
 2946         CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_action\n"));
 2947 
 2948         start_ccb->ccb_h.status = CAM_REQ_INPROG;
 2949 
 2950         iopl = splsoftcam();
 2951         switch (start_ccb->ccb_h.func_code) {
 2952         case XPT_SCSI_IO:
 2953         {
 2954 #ifdef CAM_NEW_TRAN_CODE
 2955                 struct cam_ed *device;
 2956 #endif /* CAM_NEW_TRAN_CODE */
 2957 #ifdef CAMDEBUG
 2958                 char cdb_str[(SCSI_MAX_CDBLEN * 3) + 1];
 2959                 struct cam_path *path;
 2960 
 2961                 path = start_ccb->ccb_h.path;
 2962 #endif
 2963 
 2964                 /*
 2965                  * For the sake of compatibility with SCSI-1
 2966                  * devices that may not understand the identify
 2967                  * message, we include lun information in the
 2968                  * second byte of all commands.  SCSI-1 specifies
 2969                  * that luns are a 3 bit value and reserves only 3
 2970                  * bits for lun information in the CDB.  Later
 2971                  * revisions of the SCSI spec allow for more than 8
 2972                  * luns, but have deprecated lun information in the
 2973                  * CDB.  So, if the lun won't fit, we must omit.
 2974                  *
 2975                  * Also be aware that during initial probing for devices,
 2976                  * the inquiry information is unknown but initialized to 0.
 2977                  * This means that this code will be exercised while probing
 2978                  * devices with an ANSI revision greater than 2.
 2979                  */
 2980 #ifdef CAM_NEW_TRAN_CODE
 2981                 device = start_ccb->ccb_h.path->device;
 2982                 if (device->protocol_version <= SCSI_REV_2
 2983 #else /* CAM_NEW_TRAN_CODE */
 2984                 if (SID_ANSI_REV(&start_ccb->ccb_h.path->device->inq_data) <= 2
 2985 #endif /* CAM_NEW_TRAN_CODE */
 2986                  && start_ccb->ccb_h.target_lun < 8
 2987                  && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) {
 2988 
 2989                         start_ccb->csio.cdb_io.cdb_bytes[1] |=
 2990                             start_ccb->ccb_h.target_lun << 5;
 2991                 }
 2992                 start_ccb->csio.scsi_status = SCSI_STATUS_OK;
 2993                 CAM_DEBUG(path, CAM_DEBUG_CDB,("%s. CDB: %s\n",
 2994                           scsi_op_desc(start_ccb->csio.cdb_io.cdb_bytes[0],
 2995                                        &path->device->inq_data),
 2996                           scsi_cdb_string(start_ccb->csio.cdb_io.cdb_bytes,
 2997                                           cdb_str, sizeof(cdb_str))));
 2998                 /* FALLTHROUGH */
 2999         }
 3000         case XPT_TARGET_IO:
 3001         case XPT_CONT_TARGET_IO:
 3002                 start_ccb->csio.sense_resid = 0;
 3003                 start_ccb->csio.resid = 0;
 3004                 /* FALLTHROUGH */
 3005         case XPT_RESET_DEV:
 3006         case XPT_ENG_EXEC:
 3007         {
 3008                 struct cam_path *path;
 3009                 int s;
 3010                 int runq;
 3011 
 3012                 path = start_ccb->ccb_h.path;
 3013                 s = splsoftcam();
 3014 
 3015                 cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb);
 3016                 if (path->device->qfrozen_cnt == 0)
 3017                         runq = xpt_schedule_dev_sendq(path->bus, path->device);
 3018                 else
 3019                         runq = 0;
 3020                 splx(s);
 3021                 if (runq != 0)
 3022                         xpt_run_dev_sendq(path->bus);
 3023                 break;
 3024         }
 3025         case XPT_SET_TRAN_SETTINGS:
 3026         {
 3027                 xpt_set_transfer_settings(&start_ccb->cts,
 3028                                           start_ccb->ccb_h.path->device,
 3029                                           /*async_update*/FALSE);
 3030                 break;
 3031         }
 3032         case XPT_CALC_GEOMETRY:
 3033         {
 3034                 struct cam_sim *sim;
 3035 
 3036                 /* Filter out garbage */
 3037                 if (start_ccb->ccg.block_size == 0
 3038                  || start_ccb->ccg.volume_size == 0) {
 3039                         start_ccb->ccg.cylinders = 0;
 3040                         start_ccb->ccg.heads = 0;
 3041                         start_ccb->ccg.secs_per_track = 0;
 3042                         start_ccb->ccb_h.status = CAM_REQ_CMP;
 3043                         break;
 3044                 }
 3045 #ifdef PC98
 3046                 /*
 3047                  * In a PC-98 system, geometry translation depens on
 3048                  * the "real" device geometry obtained from mode page 4.
 3049                  * SCSI geometry translation is performed in the
 3050                  * initialization routine of the SCSI BIOS and the result
 3051                  * stored in host memory.  If the translation is available
 3052                  * in host memory, use it.  If not, rely on the default
 3053                  * translation the device driver performs.
 3054                  */
 3055                 if (scsi_da_bios_params(&start_ccb->ccg) != 0) {
 3056                         start_ccb->ccb_h.status = CAM_REQ_CMP;
 3057                         break;
 3058                 }
 3059 #endif
 3060                 sim = start_ccb->ccb_h.path->bus->sim;
 3061                 (*(sim->sim_action))(sim, start_ccb);
 3062                 break;
 3063         }
 3064         case XPT_ABORT:
 3065         {
 3066                 union ccb* abort_ccb;
 3067                 int s;                          
 3068 
 3069                 abort_ccb = start_ccb->cab.abort_ccb;
 3070                 if (XPT_FC_IS_DEV_QUEUED(abort_ccb)) {
 3071 
 3072                         if (abort_ccb->ccb_h.pinfo.index >= 0) {
 3073                                 struct cam_ccbq *ccbq;
 3074 
 3075                                 ccbq = &abort_ccb->ccb_h.path->device->ccbq;
 3076                                 cam_ccbq_remove_ccb(ccbq, abort_ccb);
 3077                                 abort_ccb->ccb_h.status =
 3078                                     CAM_REQ_ABORTED|CAM_DEV_QFRZN;
 3079                                 xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
 3080                                 s = splcam();
 3081                                 xpt_done(abort_ccb);
 3082                                 splx(s);
 3083                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 3084                                 break;
 3085                         }
 3086                         if (abort_ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX
 3087                          && (abort_ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) {
 3088                                 /*
 3089                                  * We've caught this ccb en route to
 3090                                  * the SIM.  Flag it for abort and the
 3091                                  * SIM will do so just before starting
 3092                                  * real work on the CCB.
 3093                                  */
 3094                                 abort_ccb->ccb_h.status =
 3095                                     CAM_REQ_ABORTED|CAM_DEV_QFRZN;
 3096                                 xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
 3097                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 3098                                 break;
 3099                         }
 3100                 } 
 3101                 if (XPT_FC_IS_QUEUED(abort_ccb)
 3102                  && (abort_ccb->ccb_h.pinfo.index == CAM_DONEQ_INDEX)) {
 3103                         /*
 3104                          * It's already completed but waiting
 3105                          * for our SWI to get to it.
 3106                          */
 3107                         start_ccb->ccb_h.status = CAM_UA_ABORT;
 3108                         break;
 3109                 }
 3110                 /*
 3111                  * If we weren't able to take care of the abort request
 3112                  * in the XPT, pass the request down to the SIM for processing.
 3113                  */
 3114                 /* FALLTHROUGH */
 3115         }
 3116         case XPT_ACCEPT_TARGET_IO:
 3117         case XPT_EN_LUN:
 3118         case XPT_IMMED_NOTIFY:
 3119         case XPT_NOTIFY_ACK:
 3120         case XPT_GET_TRAN_SETTINGS:
 3121         case XPT_RESET_BUS:
 3122         {
 3123                 struct cam_sim *sim;
 3124 
 3125                 sim = start_ccb->ccb_h.path->bus->sim;
 3126                 (*(sim->sim_action))(sim, start_ccb);
 3127                 break;
 3128         }
 3129         case XPT_PATH_INQ:
 3130         {
 3131                 struct cam_sim *sim;
 3132 
 3133                 sim = start_ccb->ccb_h.path->bus->sim;
 3134                 (*(sim->sim_action))(sim, start_ccb);
 3135                 break;
 3136         }
 3137         case XPT_PATH_STATS:
 3138                 start_ccb->cpis.last_reset =
 3139                         start_ccb->ccb_h.path->bus->last_reset;
 3140                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 3141                 break;
 3142         case XPT_GDEV_TYPE:
 3143         {
 3144                 struct cam_ed *dev;
 3145                 int s;
 3146 
 3147                 dev = start_ccb->ccb_h.path->device;
 3148                 s = splcam();
 3149                 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
 3150                         start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
 3151                 } else {
 3152                         struct ccb_getdev *cgd;
 3153                         struct cam_eb *bus;
 3154                         struct cam_et *tar;
 3155 
 3156                         cgd = &start_ccb->cgd;
 3157                         bus = cgd->ccb_h.path->bus;
 3158                         tar = cgd->ccb_h.path->target;
 3159                         cgd->inq_data = dev->inq_data;
 3160                         cgd->ccb_h.status = CAM_REQ_CMP;
 3161                         cgd->serial_num_len = dev->serial_num_len;
 3162                         if ((dev->serial_num_len > 0)
 3163                          && (dev->serial_num != NULL))
 3164                                 bcopy(dev->serial_num, cgd->serial_num,
 3165                                       dev->serial_num_len);
 3166                 }
 3167                 splx(s);
 3168                 break; 
 3169         }
 3170         case XPT_GDEV_STATS:
 3171         {
 3172                 struct cam_ed *dev;
 3173                 int s;
 3174 
 3175                 dev = start_ccb->ccb_h.path->device;
 3176                 s = splcam();
 3177                 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
 3178                         start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
 3179                 } else {
 3180                         struct ccb_getdevstats *cgds;
 3181                         struct cam_eb *bus;
 3182                         struct cam_et *tar;
 3183 
 3184                         cgds = &start_ccb->cgds;
 3185                         bus = cgds->ccb_h.path->bus;
 3186                         tar = cgds->ccb_h.path->target;
 3187                         cgds->dev_openings = dev->ccbq.dev_openings;
 3188                         cgds->dev_active = dev->ccbq.dev_active;
 3189                         cgds->devq_openings = dev->ccbq.devq_openings;
 3190                         cgds->devq_queued = dev->ccbq.queue.entries;
 3191                         cgds->held = dev->ccbq.held;
 3192                         cgds->last_reset = tar->last_reset;
 3193                         cgds->maxtags = dev->quirk->maxtags;
 3194                         cgds->mintags = dev->quirk->mintags;
 3195                         if (timevalcmp(&tar->last_reset, &bus->last_reset, <))
 3196                                 cgds->last_reset = bus->last_reset;
 3197                         cgds->ccb_h.status = CAM_REQ_CMP;
 3198                 }
 3199                 splx(s);
 3200                 break;
 3201         }
 3202         case XPT_GDEVLIST:
 3203         {
 3204                 struct cam_periph       *nperiph;
 3205                 struct periph_list      *periph_head;
 3206                 struct ccb_getdevlist   *cgdl;
 3207                 u_int                   i;
 3208                 int                     s;
 3209                 struct cam_ed           *device;
 3210                 int                     found;
 3211 
 3212 
 3213                 found = 0;
 3214 
 3215                 /*
 3216                  * Don't want anyone mucking with our data.
 3217                  */
 3218                 s = splcam();
 3219                 device = start_ccb->ccb_h.path->device;
 3220                 periph_head = &device->periphs;
 3221                 cgdl = &start_ccb->cgdl;
 3222 
 3223                 /*
 3224                  * Check and see if the list has changed since the user
 3225                  * last requested a list member.  If so, tell them that the
 3226                  * list has changed, and therefore they need to start over 
 3227                  * from the beginning.
 3228                  */
 3229                 if ((cgdl->index != 0) && 
 3230                     (cgdl->generation != device->generation)) {
 3231                         cgdl->status = CAM_GDEVLIST_LIST_CHANGED;
 3232                         splx(s);
 3233                         break;
 3234                 }
 3235 
 3236                 /*
 3237                  * Traverse the list of peripherals and attempt to find 
 3238                  * the requested peripheral.
 3239                  */
 3240                 for (nperiph = SLIST_FIRST(periph_head), i = 0;
 3241                      (nperiph != NULL) && (i <= cgdl->index);
 3242                      nperiph = SLIST_NEXT(nperiph, periph_links), i++) {
 3243                         if (i == cgdl->index) {
 3244                                 strncpy(cgdl->periph_name,
 3245                                         nperiph->periph_name,
 3246                                         DEV_IDLEN);
 3247                                 cgdl->unit_number = nperiph->unit_number;
 3248                                 found = 1;
 3249                         }
 3250                 }
 3251                 if (found == 0) {
 3252                         cgdl->status = CAM_GDEVLIST_ERROR;
 3253                         splx(s);
 3254                         break;
 3255                 }
 3256 
 3257                 if (nperiph == NULL)
 3258                         cgdl->status = CAM_GDEVLIST_LAST_DEVICE;
 3259                 else
 3260                         cgdl->status = CAM_GDEVLIST_MORE_DEVS;
 3261 
 3262                 cgdl->index++;
 3263                 cgdl->generation = device->generation;
 3264 
 3265                 splx(s);
 3266                 cgdl->ccb_h.status = CAM_REQ_CMP;
 3267                 break;
 3268         }
 3269         case XPT_DEV_MATCH:
 3270         {
 3271                 int s;
 3272                 dev_pos_type position_type;
 3273                 struct ccb_dev_match *cdm;
 3274                 int ret;
 3275 
 3276                 cdm = &start_ccb->cdm;
 3277 
 3278                 /*
 3279                  * Prevent EDT changes while we traverse it.
 3280                  */
 3281                 s = splcam();
 3282                 /*
 3283                  * There are two ways of getting at information in the EDT.
 3284                  * The first way is via the primary EDT tree.  It starts
 3285                  * with a list of busses, then a list of targets on a bus,
 3286                  * then devices/luns on a target, and then peripherals on a
 3287                  * device/lun.  The "other" way is by the peripheral driver
 3288                  * lists.  The peripheral driver lists are organized by
 3289                  * peripheral driver.  (obviously)  So it makes sense to
 3290                  * use the peripheral driver list if the user is looking
 3291                  * for something like "da1", or all "da" devices.  If the
 3292                  * user is looking for something on a particular bus/target
 3293                  * or lun, it's generally better to go through the EDT tree.
 3294                  */
 3295 
 3296                 if (cdm->pos.position_type != CAM_DEV_POS_NONE)
 3297                         position_type = cdm->pos.position_type;
 3298                 else {
 3299                         u_int i;
 3300 
 3301                         position_type = CAM_DEV_POS_NONE;
 3302 
 3303                         for (i = 0; i < cdm->num_patterns; i++) {
 3304                                 if ((cdm->patterns[i].type == DEV_MATCH_BUS)
 3305                                  ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){
 3306                                         position_type = CAM_DEV_POS_EDT;
 3307                                         break;
 3308                                 }
 3309                         }
 3310 
 3311                         if (cdm->num_patterns == 0)
 3312                                 position_type = CAM_DEV_POS_EDT;
 3313                         else if (position_type == CAM_DEV_POS_NONE)
 3314                                 position_type = CAM_DEV_POS_PDRV;
 3315                 }
 3316 
 3317                 switch(position_type & CAM_DEV_POS_TYPEMASK) {
 3318                 case CAM_DEV_POS_EDT:
 3319                         ret = xptedtmatch(cdm);
 3320                         break;
 3321                 case CAM_DEV_POS_PDRV:
 3322                         ret = xptperiphlistmatch(cdm);
 3323                         break;
 3324                 default:
 3325                         cdm->status = CAM_DEV_MATCH_ERROR;
 3326                         break;
 3327                 }
 3328 
 3329                 splx(s);
 3330 
 3331                 if (cdm->status == CAM_DEV_MATCH_ERROR)
 3332                         start_ccb->ccb_h.status = CAM_REQ_CMP_ERR;
 3333                 else
 3334                         start_ccb->ccb_h.status = CAM_REQ_CMP;
 3335 
 3336                 break;
 3337         }
 3338         case XPT_SASYNC_CB:
 3339         {
 3340                 struct ccb_setasync *csa;
 3341                 struct async_node *cur_entry;
 3342                 struct async_list *async_head;
 3343                 u_int32_t added;
 3344                 int s;
 3345 
 3346                 csa = &start_ccb->csa;
 3347                 added = csa->event_enable;
 3348                 async_head = &csa->ccb_h.path->device->asyncs;
 3349 
 3350                 /*
 3351                  * If there is already an entry for us, simply
 3352                  * update it.
 3353                  */
 3354                 s = splcam();
 3355                 cur_entry = SLIST_FIRST(async_head);
 3356                 while (cur_entry != NULL) {
 3357                         if ((cur_entry->callback_arg == csa->callback_arg)
 3358                          && (cur_entry->callback == csa->callback))
 3359                                 break;
 3360                         cur_entry = SLIST_NEXT(cur_entry, links);
 3361                 }
 3362 
 3363                 if (cur_entry != NULL) {
 3364                         /*
 3365                          * If the request has no flags set,
 3366                          * remove the entry.
 3367                          */
 3368                         added &= ~cur_entry->event_enable;
 3369                         if (csa->event_enable == 0) {
 3370                                 SLIST_REMOVE(async_head, cur_entry,
 3371                                              async_node, links);
 3372                                 csa->ccb_h.path->device->refcount--;
 3373                                 free(cur_entry, M_DEVBUF);
 3374                         } else {
 3375                                 cur_entry->event_enable = csa->event_enable;
 3376                         }
 3377                 } else {
 3378                         cur_entry = malloc(sizeof(*cur_entry), M_DEVBUF,
 3379                                            M_NOWAIT);
 3380                         if (cur_entry == NULL) {
 3381                                 splx(s);
 3382                                 csa->ccb_h.status = CAM_RESRC_UNAVAIL;
 3383                                 break;
 3384                         }
 3385                         cur_entry->event_enable = csa->event_enable;
 3386                         cur_entry->callback_arg = csa->callback_arg;
 3387                         cur_entry->callback = csa->callback;
 3388                         SLIST_INSERT_HEAD(async_head, cur_entry, links);
 3389                         csa->ccb_h.path->device->refcount++;
 3390                 }
 3391 
 3392                 if ((added & AC_FOUND_DEVICE) != 0) {
 3393                         /*
 3394                          * Get this peripheral up to date with all
 3395                          * the currently existing devices.
 3396                          */
 3397                         xpt_for_all_devices(xptsetasyncfunc, cur_entry);
 3398                 }
 3399                 if ((added & AC_PATH_REGISTERED) != 0) {
 3400                         /*
 3401                          * Get this peripheral up to date with all
 3402                          * the currently existing busses.
 3403                          */
 3404                         xpt_for_all_busses(xptsetasyncbusfunc, cur_entry);
 3405                 }
 3406                 splx(s);
 3407                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 3408                 break;
 3409         }
 3410         case XPT_REL_SIMQ:
 3411         {
 3412                 struct ccb_relsim *crs;
 3413                 struct cam_ed *dev;
 3414                 int s;
 3415 
 3416                 crs = &start_ccb->crs;
 3417                 dev = crs->ccb_h.path->device;
 3418                 if (dev == NULL) {
 3419 
 3420                         crs->ccb_h.status = CAM_DEV_NOT_THERE;
 3421                         break;
 3422                 }
 3423 
 3424                 s = splcam();
 3425 
 3426                 if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) {
 3427 
 3428                         if ((dev->inq_data.flags & SID_CmdQue) != 0) {
 3429 
 3430                                 /* Don't ever go below one opening */
 3431                                 if (crs->openings > 0) {
 3432                                         xpt_dev_ccbq_resize(crs->ccb_h.path,
 3433                                                             crs->openings);
 3434 
 3435                                         if (bootverbose) {
 3436                                                 xpt_print_path(crs->ccb_h.path);
 3437                                                 printf("tagged openings "
 3438                                                        "now %d\n",
 3439                                                        crs->openings);
 3440                                         }
 3441                                 }
 3442                         }
 3443                 }
 3444 
 3445                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) {
 3446 
 3447                         if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
 3448 
 3449                                 /*
 3450                                  * Just extend the old timeout and decrement
 3451                                  * the freeze count so that a single timeout
 3452                                  * is sufficient for releasing the queue.
 3453                                  */
 3454                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
 3455                                 untimeout(xpt_release_devq_timeout,
 3456                                           dev, dev->c_handle);
 3457                         } else {
 3458 
 3459                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
 3460                         }
 3461 
 3462                         dev->c_handle =
 3463                                 timeout(xpt_release_devq_timeout,
 3464                                         dev,
 3465                                         (crs->release_timeout * hz) / 1000);
 3466 
 3467                         dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING;
 3468 
 3469                 }
 3470 
 3471                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) {
 3472 
 3473                         if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) {
 3474                                 /*
 3475                                  * Decrement the freeze count so that a single
 3476                                  * completion is still sufficient to unfreeze
 3477                                  * the queue.
 3478                                  */
 3479                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
 3480                         } else {
 3481                                 
 3482                                 dev->flags |= CAM_DEV_REL_ON_COMPLETE;
 3483                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
 3484                         }
 3485                 }
 3486 
 3487                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) {
 3488 
 3489                         if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
 3490                          || (dev->ccbq.dev_active == 0)) {
 3491 
 3492                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
 3493                         } else {
 3494                                 
 3495                                 dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY;
 3496                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
 3497                         }
 3498                 }
 3499                 splx(s);
 3500                 
 3501                 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0) {
 3502 
 3503                         xpt_release_devq(crs->ccb_h.path, /*count*/1,
 3504                                          /*run_queue*/TRUE);
 3505                 }
 3506                 start_ccb->crs.qfrozen_cnt = dev->qfrozen_cnt;
 3507                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 3508                 break;
 3509         }
 3510         case XPT_SCAN_BUS:
 3511                 xpt_scan_bus(start_ccb->ccb_h.path->periph, start_ccb);
 3512                 break;
 3513         case XPT_SCAN_LUN:
 3514                 xpt_scan_lun(start_ccb->ccb_h.path->periph,
 3515                              start_ccb->ccb_h.path, start_ccb->crcn.flags,
 3516                              start_ccb);
 3517                 break;
 3518         case XPT_DEBUG: {
 3519 #ifdef CAMDEBUG
 3520                 int s;
 3521                 
 3522                 s = splcam();
 3523 #ifdef CAM_DEBUG_DELAY
 3524                 cam_debug_delay = CAM_DEBUG_DELAY;
 3525 #endif
 3526                 cam_dflags = start_ccb->cdbg.flags;
 3527                 if (cam_dpath != NULL) {
 3528                         xpt_free_path(cam_dpath);
 3529                         cam_dpath = NULL;
 3530                 }
 3531 
 3532                 if (cam_dflags != CAM_DEBUG_NONE) {
 3533                         if (xpt_create_path(&cam_dpath, xpt_periph,
 3534                                             start_ccb->ccb_h.path_id,
 3535                                             start_ccb->ccb_h.target_id,
 3536                                             start_ccb->ccb_h.target_lun) !=
 3537                                             CAM_REQ_CMP) {
 3538                                 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
 3539                                 cam_dflags = CAM_DEBUG_NONE;
 3540                         } else {
 3541                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 3542                                 xpt_print_path(cam_dpath);
 3543                                 printf("debugging flags now %x\n", cam_dflags);
 3544                         }
 3545                 } else {
 3546                         cam_dpath = NULL;
 3547                         start_ccb->ccb_h.status = CAM_REQ_CMP;
 3548                 }
 3549                 splx(s);
 3550 #else /* !CAMDEBUG */
 3551                 start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
 3552 #endif /* CAMDEBUG */
 3553                 break;
 3554         }
 3555         case XPT_NOOP:
 3556                 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0)
 3557                         xpt_freeze_devq(start_ccb->ccb_h.path, 1);
 3558                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 3559                 break;
 3560         default:
 3561         case XPT_SDEV_TYPE:
 3562         case XPT_TERM_IO:
 3563         case XPT_ENG_INQ:
 3564                 /* XXX Implement */
 3565                 start_ccb->ccb_h.status = CAM_PROVIDE_FAIL;
 3566                 break;
 3567         }
 3568         splx(iopl);
 3569 }
 3570 
 3571 void
 3572 xpt_polled_action(union ccb *start_ccb)
 3573 {
 3574         int       s;
 3575         u_int32_t timeout;
 3576         struct    cam_sim *sim; 
 3577         struct    cam_devq *devq;
 3578         struct    cam_ed *dev;
 3579 
 3580         timeout = start_ccb->ccb_h.timeout;
 3581         sim = start_ccb->ccb_h.path->bus->sim;
 3582         devq = sim->devq;
 3583         dev = start_ccb->ccb_h.path->device;
 3584 
 3585         s = splcam();
 3586 
 3587         /*
 3588          * Steal an opening so that no other queued requests
 3589          * can get it before us while we simulate interrupts.
 3590          */
 3591         dev->ccbq.devq_openings--;
 3592         dev->ccbq.dev_openings--;       
 3593         
 3594         while((devq->send_openings <= 0 || dev->ccbq.dev_openings < 0)
 3595            && (--timeout > 0)) {
 3596                 DELAY(1000);
 3597                 (*(sim->sim_poll))(sim);
 3598                 camisr(&cam_netq);
 3599                 camisr(&cam_bioq);
 3600         }
 3601         
 3602         dev->ccbq.devq_openings++;
 3603         dev->ccbq.dev_openings++;
 3604         
 3605         if (timeout != 0) {
 3606                 xpt_action(start_ccb);
 3607                 while(--timeout > 0) {
 3608                         (*(sim->sim_poll))(sim);
 3609                         camisr(&cam_netq);
 3610                         camisr(&cam_bioq);
 3611                         if ((start_ccb->ccb_h.status  & CAM_STATUS_MASK)
 3612                             != CAM_REQ_INPROG)
 3613                                 break;
 3614                         DELAY(1000);
 3615                 }
 3616                 if (timeout == 0) {
 3617                         /*
 3618                          * XXX Is it worth adding a sim_timeout entry
 3619                          * point so we can attempt recovery?  If
 3620                          * this is only used for dumps, I don't think
 3621                          * it is.
 3622                          */
 3623                         start_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
 3624                 }
 3625         } else {
 3626                 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
 3627         }
 3628         splx(s);
 3629 }
 3630         
 3631 /*
 3632  * Schedule a peripheral driver to receive a ccb when it's
 3633  * target device has space for more transactions.
 3634  */
 3635 void
 3636 xpt_schedule(struct cam_periph *perph, u_int32_t new_priority)
 3637 {
 3638         struct cam_ed *device;
 3639         int s;
 3640         int runq;
 3641 
 3642         CAM_DEBUG(perph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n"));
 3643         device = perph->path->device;
 3644         s = splsoftcam();
 3645         if (periph_is_queued(perph)) {
 3646                 /* Simply reorder based on new priority */
 3647                 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
 3648                           ("   change priority to %d\n", new_priority));
 3649                 if (new_priority < perph->pinfo.priority) {
 3650                         camq_change_priority(&device->drvq,
 3651                                              perph->pinfo.index,
 3652                                              new_priority);
 3653                 }
 3654                 runq = 0;
 3655         } else {
 3656                 /* New entry on the queue */
 3657                 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
 3658                           ("   added periph to queue\n"));
 3659                 perph->pinfo.priority = new_priority;
 3660                 perph->pinfo.generation = ++device->drvq.generation;
 3661                 camq_insert(&device->drvq, &perph->pinfo);
 3662                 runq = xpt_schedule_dev_allocq(perph->path->bus, device);
 3663         }
 3664         splx(s);
 3665         if (runq != 0) {
 3666                 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
 3667                           ("   calling xpt_run_devq\n"));
 3668                 xpt_run_dev_allocq(perph->path->bus);
 3669         }
 3670 }
 3671 
 3672 
 3673 /*
 3674  * Schedule a device to run on a given queue.
 3675  * If the device was inserted as a new entry on the queue,
 3676  * return 1 meaning the device queue should be run. If we
 3677  * were already queued, implying someone else has already
 3678  * started the queue, return 0 so the caller doesn't attempt
 3679  * to run the queue.  Must be run at either splsoftcam
 3680  * (or splcam since that encompases splsoftcam).
 3681  */
 3682 static int
 3683 xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo,
 3684                  u_int32_t new_priority)
 3685 {
 3686         int retval;
 3687         u_int32_t old_priority;
 3688 
 3689         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_schedule_dev\n"));
 3690 
 3691         old_priority = pinfo->priority;
 3692 
 3693         /*
 3694          * Are we already queued?
 3695          */
 3696         if (pinfo->index != CAM_UNQUEUED_INDEX) {
 3697                 /* Simply reorder based on new priority */
 3698                 if (new_priority < old_priority) {
 3699                         camq_change_priority(queue, pinfo->index,
 3700                                              new_priority);
 3701                         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3702                                         ("changed priority to %d\n",
 3703                                          new_priority));
 3704                 }
 3705                 retval = 0;
 3706         } else {
 3707                 /* New entry on the queue */
 3708                 if (new_priority < old_priority)
 3709                         pinfo->priority = new_priority;
 3710 
 3711                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3712                                 ("Inserting onto queue\n"));
 3713                 pinfo->generation = ++queue->generation;
 3714                 camq_insert(queue, pinfo);
 3715                 retval = 1;
 3716         }
 3717         return (retval);
 3718 }
 3719 
 3720 static void
 3721 xpt_run_dev_allocq(struct cam_eb *bus)
 3722 {
 3723         struct  cam_devq *devq;
 3724         int     s;
 3725 
 3726         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_allocq\n"));
 3727         devq = bus->sim->devq;
 3728 
 3729         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3730                         ("   qfrozen_cnt == 0x%x, entries == %d, "
 3731                          "openings == %d, active == %d\n",
 3732                          devq->alloc_queue.qfrozen_cnt,
 3733                          devq->alloc_queue.entries,
 3734                          devq->alloc_openings,
 3735                          devq->alloc_active));
 3736 
 3737         s = splsoftcam();
 3738         devq->alloc_queue.qfrozen_cnt++;
 3739         while ((devq->alloc_queue.entries > 0)
 3740             && (devq->alloc_openings > 0)
 3741             && (devq->alloc_queue.qfrozen_cnt <= 1)) {
 3742                 struct  cam_ed_qinfo *qinfo;
 3743                 struct  cam_ed *device;
 3744                 union   ccb *work_ccb;
 3745                 struct  cam_periph *drv;
 3746                 struct  camq *drvq;
 3747                 
 3748                 qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->alloc_queue,
 3749                                                            CAMQ_HEAD);
 3750                 device = qinfo->device;
 3751 
 3752                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3753                                 ("running device %p\n", device));
 3754 
 3755                 drvq = &device->drvq;
 3756 
 3757 #ifdef CAMDEBUG
 3758                 if (drvq->entries <= 0) {
 3759                         panic("xpt_run_dev_allocq: "
 3760                               "Device on queue without any work to do");
 3761                 }
 3762 #endif
 3763                 if ((work_ccb = xpt_get_ccb(device)) != NULL) {
 3764                         devq->alloc_openings--;
 3765                         devq->alloc_active++;
 3766                         drv = (struct cam_periph*)camq_remove(drvq, CAMQ_HEAD);
 3767                         splx(s);
 3768                         xpt_setup_ccb(&work_ccb->ccb_h, drv->path,
 3769                                       drv->pinfo.priority);
 3770                         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3771                                         ("calling periph start\n"));
 3772                         drv->periph_start(drv, work_ccb);
 3773                 } else {
 3774                         /*
 3775                          * Malloc failure in alloc_ccb
 3776                          */
 3777                         /*
 3778                          * XXX add us to a list to be run from free_ccb
 3779                          * if we don't have any ccbs active on this
 3780                          * device queue otherwise we may never get run
 3781                          * again.
 3782                          */
 3783                         break;
 3784                 }
 3785         
 3786                 /* Raise IPL for possible insertion and test at top of loop */
 3787                 s = splsoftcam();
 3788 
 3789                 if (drvq->entries > 0) {
 3790                         /* We have more work.  Attempt to reschedule */
 3791                         xpt_schedule_dev_allocq(bus, device);
 3792                 }
 3793         }
 3794         devq->alloc_queue.qfrozen_cnt--;
 3795         splx(s);
 3796 }
 3797 
 3798 static void
 3799 xpt_run_dev_sendq(struct cam_eb *bus)
 3800 {
 3801         struct  cam_devq *devq;
 3802         int     s;
 3803 
 3804         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_sendq\n"));
 3805         
 3806         devq = bus->sim->devq;
 3807 
 3808         s = splcam();
 3809         devq->send_queue.qfrozen_cnt++;
 3810         splx(s);
 3811         s = splsoftcam();
 3812         while ((devq->send_queue.entries > 0)
 3813             && (devq->send_openings > 0)) {
 3814                 struct  cam_ed_qinfo *qinfo;
 3815                 struct  cam_ed *device;
 3816                 union ccb *work_ccb;
 3817                 struct  cam_sim *sim;
 3818                 int     ospl;
 3819 
 3820                 ospl = splcam();
 3821                 if (devq->send_queue.qfrozen_cnt > 1) {
 3822                         splx(ospl);
 3823                         break;
 3824                 }
 3825 
 3826                 qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->send_queue,
 3827                                                            CAMQ_HEAD);
 3828                 device = qinfo->device;
 3829 
 3830                 /*
 3831                  * If the device has been "frozen", don't attempt
 3832                  * to run it.
 3833                  */
 3834                 if (device->qfrozen_cnt > 0) {
 3835                         splx(ospl);
 3836                         continue;
 3837                 }
 3838 
 3839                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3840                                 ("running device %p\n", device));
 3841 
 3842                 work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD);
 3843                 if (work_ccb == NULL) {
 3844                         printf("device on run queue with no ccbs???\n");
 3845                         splx(ospl);
 3846                         continue;
 3847                 }
 3848 
 3849                 if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) {
 3850 
 3851                         if (num_highpower <= 0) {
 3852                                 /*
 3853                                  * We got a high power command, but we
 3854                                  * don't have any available slots.  Freeze
 3855                                  * the device queue until we have a slot
 3856                                  * available.
 3857                                  */
 3858                                 device->qfrozen_cnt++;
 3859                                 STAILQ_INSERT_TAIL(&highpowerq, 
 3860                                                    &work_ccb->ccb_h, 
 3861                                                    xpt_links.stqe);
 3862 
 3863                                 splx(ospl);
 3864                                 continue;
 3865                         } else {
 3866                                 /*
 3867                                  * Consume a high power slot while
 3868                                  * this ccb runs.
 3869                                  */
 3870                                 num_highpower--;
 3871                         }
 3872                 }
 3873                 devq->active_dev = device;
 3874                 cam_ccbq_remove_ccb(&device->ccbq, work_ccb);
 3875 
 3876                 cam_ccbq_send_ccb(&device->ccbq, work_ccb);
 3877                 splx(ospl);
 3878 
 3879                 devq->send_openings--;
 3880                 devq->send_active++;            
 3881                 
 3882                 if (device->ccbq.queue.entries > 0)
 3883                         xpt_schedule_dev_sendq(bus, device);
 3884 
 3885                 if (work_ccb && (work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0){
 3886                         /*
 3887                          * The client wants to freeze the queue
 3888                          * after this CCB is sent.
 3889                          */
 3890                         ospl = splcam();
 3891                         device->qfrozen_cnt++;
 3892                         splx(ospl);
 3893                 }
 3894                 
 3895                 splx(s);
 3896 
 3897                 /* In Target mode, the peripheral driver knows best... */
 3898                 if (work_ccb->ccb_h.func_code == XPT_SCSI_IO) {
 3899                         if ((device->inq_flags & SID_CmdQue) != 0
 3900                          && work_ccb->csio.tag_action != CAM_TAG_ACTION_NONE)
 3901                                 work_ccb->ccb_h.flags |= CAM_TAG_ACTION_VALID;
 3902                         else
 3903                                 /*
 3904                                  * Clear this in case of a retried CCB that
 3905                                  * failed due to a rejected tag.
 3906                                  */
 3907                                 work_ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
 3908                 }
 3909 
 3910                 /*
 3911                  * Device queues can be shared among multiple sim instances
 3912                  * that reside on different busses.  Use the SIM in the queue
 3913                  * CCB's path, rather than the one in the bus that was passed
 3914                  * into this function.
 3915                  */
 3916                 sim = work_ccb->ccb_h.path->bus->sim;
 3917                 (*(sim->sim_action))(sim, work_ccb);
 3918 
 3919                 ospl = splcam();
 3920                 devq->active_dev = NULL;
 3921                 splx(ospl);
 3922                 /* Raise IPL for possible insertion and test at top of loop */
 3923                 s = splsoftcam();
 3924         }
 3925         splx(s);
 3926         s = splcam();
 3927         devq->send_queue.qfrozen_cnt--;
 3928         splx(s);
 3929 }
 3930 
 3931 /*
 3932  * This function merges stuff from the slave ccb into the master ccb, while
 3933  * keeping important fields in the master ccb constant.
 3934  */
 3935 void
 3936 xpt_merge_ccb(union ccb *master_ccb, union ccb *slave_ccb)
 3937 {
 3938         /*
 3939          * Pull fields that are valid for peripheral drivers to set
 3940          * into the master CCB along with the CCB "payload".
 3941          */
 3942         master_ccb->ccb_h.retry_count = slave_ccb->ccb_h.retry_count;
 3943         master_ccb->ccb_h.func_code = slave_ccb->ccb_h.func_code;
 3944         master_ccb->ccb_h.timeout = slave_ccb->ccb_h.timeout;
 3945         master_ccb->ccb_h.flags = slave_ccb->ccb_h.flags;
 3946         bcopy(&(&slave_ccb->ccb_h)[1], &(&master_ccb->ccb_h)[1],
 3947               sizeof(union ccb) - sizeof(struct ccb_hdr));
 3948 }
 3949 
 3950 void
 3951 xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority)
 3952 {
 3953         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_setup_ccb\n"));
 3954         ccb_h->pinfo.priority = priority;
 3955         ccb_h->path = path;
 3956         ccb_h->path_id = path->bus->path_id;
 3957         if (path->target)
 3958                 ccb_h->target_id = path->target->target_id;
 3959         else
 3960                 ccb_h->target_id = CAM_TARGET_WILDCARD;
 3961         if (path->device) {
 3962                 ccb_h->target_lun = path->device->lun_id;
 3963                 ccb_h->pinfo.generation = ++path->device->ccbq.queue.generation;
 3964         } else {
 3965                 ccb_h->target_lun = CAM_TARGET_WILDCARD;
 3966         }
 3967         ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
 3968         ccb_h->flags = 0;
 3969 }
 3970 
 3971 /* Path manipulation functions */
 3972 cam_status
 3973 xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph,
 3974                 path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
 3975 {
 3976         struct     cam_path *path;
 3977         cam_status status;
 3978 
 3979         path = (struct cam_path *)malloc(sizeof(*path), M_DEVBUF, M_NOWAIT);
 3980 
 3981         if (path == NULL) {
 3982                 status = CAM_RESRC_UNAVAIL;
 3983                 return(status);
 3984         }
 3985         status = xpt_compile_path(path, perph, path_id, target_id, lun_id);
 3986         if (status != CAM_REQ_CMP) {
 3987                 free(path, M_DEVBUF);
 3988                 path = NULL;
 3989         }
 3990         *new_path_ptr = path;
 3991         return (status);
 3992 }
 3993 
 3994 static cam_status
 3995 xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph,
 3996                  path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
 3997 {
 3998         struct       cam_eb *bus;
 3999         struct       cam_et *target;
 4000         struct       cam_ed *device;
 4001         cam_status   status;
 4002         int          s;
 4003 
 4004         status = CAM_REQ_CMP;   /* Completed without error */
 4005         target = NULL;          /* Wildcarded */
 4006         device = NULL;          /* Wildcarded */
 4007 
 4008         /*
 4009          * We will potentially modify the EDT, so block interrupts
 4010          * that may attempt to create cam paths.
 4011          */
 4012         s = splcam();
 4013         bus = xpt_find_bus(path_id);
 4014         if (bus == NULL) {
 4015                 status = CAM_PATH_INVALID;
 4016         } else {
 4017                 target = xpt_find_target(bus, target_id);
 4018                 if (target == NULL) {
 4019                         /* Create one */
 4020                         struct cam_et *new_target;
 4021 
 4022                         new_target = xpt_alloc_target(bus, target_id);
 4023                         if (new_target == NULL) {
 4024                                 status = CAM_RESRC_UNAVAIL;
 4025                         } else {
 4026                                 target = new_target;
 4027                         }
 4028                 }
 4029                 if (target != NULL) {
 4030                         device = xpt_find_device(target, lun_id);
 4031                         if (device == NULL) {
 4032                                 /* Create one */
 4033                                 struct cam_ed *new_device;
 4034 
 4035                                 new_device = xpt_alloc_device(bus,
 4036                                                               target,
 4037                                                               lun_id);
 4038                                 if (new_device == NULL) {
 4039                                         status = CAM_RESRC_UNAVAIL;
 4040                                 } else {
 4041                                         device = new_device;
 4042                                 }
 4043                         }
 4044                 }
 4045         }
 4046         splx(s);
 4047 
 4048         /*
 4049          * Only touch the user's data if we are successful.
 4050          */
 4051         if (status == CAM_REQ_CMP) {
 4052                 new_path->periph = perph;
 4053                 new_path->bus = bus;
 4054                 new_path->target = target;
 4055                 new_path->device = device;
 4056                 CAM_DEBUG(new_path, CAM_DEBUG_TRACE, ("xpt_compile_path\n"));
 4057         } else {
 4058                 if (device != NULL)
 4059                         xpt_release_device(bus, target, device);
 4060                 if (target != NULL)
 4061                         xpt_release_target(bus, target);
 4062                 if (bus != NULL)
 4063                         xpt_release_bus(bus);
 4064         }
 4065         return (status);
 4066 }
 4067 
 4068 static void
 4069 xpt_release_path(struct cam_path *path)
 4070 {
 4071         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_path\n"));
 4072         if (path->device != NULL) {
 4073                 xpt_release_device(path->bus, path->target, path->device);
 4074                 path->device = NULL;
 4075         }
 4076         if (path->target != NULL) {
 4077                 xpt_release_target(path->bus, path->target);
 4078                 path->target = NULL;
 4079         }
 4080         if (path->bus != NULL) {
 4081                 xpt_release_bus(path->bus);
 4082                 path->bus = NULL;
 4083         }
 4084 }
 4085 
 4086 void
 4087 xpt_free_path(struct cam_path *path)
 4088 {
 4089         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_free_path\n"));
 4090         xpt_release_path(path);
 4091         free(path, M_DEVBUF);
 4092 }
 4093 
 4094 
 4095 /*
 4096  * Return -1 for failure, 0 for exact match, 1 for match with wildcards
 4097  * in path1, 2 for match with wildcards in path2.
 4098  */
 4099 int
 4100 xpt_path_comp(struct cam_path *path1, struct cam_path *path2)
 4101 {
 4102         int retval = 0;
 4103 
 4104         if (path1->bus != path2->bus) {
 4105                 if (path1->bus->path_id == CAM_BUS_WILDCARD)
 4106                         retval = 1;
 4107                 else if (path2->bus->path_id == CAM_BUS_WILDCARD)
 4108                         retval = 2;
 4109                 else
 4110                         return (-1);
 4111         }
 4112         if (path1->target != path2->target) {
 4113                 if (path1->target->target_id == CAM_TARGET_WILDCARD) {
 4114                         if (retval == 0)
 4115                                 retval = 1;
 4116                 } else if (path2->target->target_id == CAM_TARGET_WILDCARD)
 4117                         retval = 2;
 4118                 else
 4119                         return (-1);
 4120         }
 4121         if (path1->device != path2->device) {
 4122                 if (path1->device->lun_id == CAM_LUN_WILDCARD) {
 4123                         if (retval == 0)
 4124                                 retval = 1;
 4125                 } else if (path2->device->lun_id == CAM_LUN_WILDCARD)
 4126                         retval = 2;
 4127                 else
 4128                         return (-1);
 4129         }
 4130         return (retval);
 4131 }
 4132 
 4133 void
 4134 xpt_print_path(struct cam_path *path)
 4135 {
 4136         if (path == NULL)
 4137                 printf("(nopath): ");
 4138         else {
 4139                 if (path->periph != NULL)
 4140                         printf("(%s%d:", path->periph->periph_name,
 4141                                path->periph->unit_number);
 4142                 else
 4143                         printf("(noperiph:");
 4144 
 4145                 if (path->bus != NULL)
 4146                         printf("%s%d:%d:", path->bus->sim->sim_name,
 4147                                path->bus->sim->unit_number,
 4148                                path->bus->sim->bus_id);
 4149                 else
 4150                         printf("nobus:");
 4151 
 4152                 if (path->target != NULL)
 4153                         printf("%d:", path->target->target_id);
 4154                 else
 4155                         printf("X:");
 4156 
 4157                 if (path->device != NULL)
 4158                         printf("%d): ", path->device->lun_id);
 4159                 else
 4160                         printf("X): ");
 4161         }
 4162 }
 4163 
 4164 int
 4165 xpt_path_string(struct cam_path *path, char *str, size_t str_len)
 4166 {
 4167         struct sbuf sb;
 4168 
 4169         sbuf_new(&sb, str, str_len, 0);
 4170 
 4171         if (path == NULL)
 4172                 sbuf_printf(&sb, "(nopath): ");
 4173         else {
 4174                 if (path->periph != NULL)
 4175                         sbuf_printf(&sb, "(%s%d:", path->periph->periph_name,
 4176                                     path->periph->unit_number);
 4177                 else
 4178                         sbuf_printf(&sb, "(noperiph:");
 4179 
 4180                 if (path->bus != NULL)
 4181                         sbuf_printf(&sb, "%s%d:%d:", path->bus->sim->sim_name,
 4182                                     path->bus->sim->unit_number,
 4183                                     path->bus->sim->bus_id);
 4184                 else
 4185                         sbuf_printf(&sb, "nobus:");
 4186 
 4187                 if (path->target != NULL)
 4188                         sbuf_printf(&sb, "%d:", path->target->target_id);
 4189                 else
 4190                         sbuf_printf(&sb, "X:");
 4191 
 4192                 if (path->device != NULL)
 4193                         sbuf_printf(&sb, "%d): ", path->device->lun_id);
 4194                 else
 4195                         sbuf_printf(&sb, "X): ");
 4196         }
 4197         sbuf_finish(&sb);
 4198 
 4199         return(sbuf_len(&sb));
 4200 }
 4201 
 4202 path_id_t
 4203 xpt_path_path_id(struct cam_path *path)
 4204 {
 4205         return(path->bus->path_id);
 4206 }
 4207 
 4208 target_id_t
 4209 xpt_path_target_id(struct cam_path *path)
 4210 {
 4211         if (path->target != NULL)
 4212                 return (path->target->target_id);
 4213         else
 4214                 return (CAM_TARGET_WILDCARD);
 4215 }
 4216 
 4217 lun_id_t
 4218 xpt_path_lun_id(struct cam_path *path)
 4219 {
 4220         if (path->device != NULL)
 4221                 return (path->device->lun_id);
 4222         else
 4223                 return (CAM_LUN_WILDCARD);
 4224 }
 4225 
 4226 struct cam_sim *
 4227 xpt_path_sim(struct cam_path *path)
 4228 {
 4229         return (path->bus->sim);
 4230 }
 4231 
 4232 struct cam_periph*
 4233 xpt_path_periph(struct cam_path *path)
 4234 {
 4235         return (path->periph);
 4236 }
 4237 
 4238 /*
 4239  * Release a CAM control block for the caller.  Remit the cost of the structure
 4240  * to the device referenced by the path.  If the this device had no 'credits'
 4241  * and peripheral drivers have registered async callbacks for this notification
 4242  * call them now.
 4243  */
 4244 void
 4245 xpt_release_ccb(union ccb *free_ccb)
 4246 {
 4247         int      s;
 4248         struct   cam_path *path;
 4249         struct   cam_ed *device;
 4250         struct   cam_eb *bus;
 4251 
 4252         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_release_ccb\n"));
 4253         path = free_ccb->ccb_h.path;
 4254         device = path->device;
 4255         bus = path->bus;
 4256         s = splsoftcam();
 4257         cam_ccbq_release_opening(&device->ccbq);
 4258         if (xpt_ccb_count > xpt_max_ccbs) {
 4259                 xpt_free_ccb(free_ccb);
 4260                 xpt_ccb_count--;
 4261         } else {
 4262                 SLIST_INSERT_HEAD(&ccb_freeq, &free_ccb->ccb_h, xpt_links.sle);
 4263         }
 4264         bus->sim->devq->alloc_openings++;
 4265         bus->sim->devq->alloc_active--;
 4266         /* XXX Turn this into an inline function - xpt_run_device?? */
 4267         if ((device_is_alloc_queued(device) == 0)
 4268          && (device->drvq.entries > 0)) {
 4269                 xpt_schedule_dev_allocq(bus, device);
 4270         }
 4271         splx(s);
 4272         if (dev_allocq_is_runnable(bus->sim->devq))
 4273                 xpt_run_dev_allocq(bus);
 4274 }
 4275 
 4276 /* Functions accessed by SIM drivers */
 4277 
 4278 /*
 4279  * A sim structure, listing the SIM entry points and instance
 4280  * identification info is passed to xpt_bus_register to hook the SIM
 4281  * into the CAM framework.  xpt_bus_register creates a cam_eb entry
 4282  * for this new bus and places it in the array of busses and assigns
 4283  * it a path_id.  The path_id may be influenced by "hard wiring"
 4284  * information specified by the user.  Once interrupt services are
 4285  * availible, the bus will be probed.
 4286  */
 4287 int32_t
 4288 xpt_bus_register(struct cam_sim *sim, u_int32_t bus)
 4289 {
 4290         struct cam_eb *new_bus;
 4291         struct cam_eb *old_bus;
 4292         struct ccb_pathinq cpi;
 4293         int s;
 4294 
 4295         sim->bus_id = bus;
 4296         new_bus = (struct cam_eb *)malloc(sizeof(*new_bus),
 4297                                           M_DEVBUF, M_NOWAIT);
 4298         if (new_bus == NULL) {
 4299                 /* Couldn't satisfy request */
 4300                 return (CAM_RESRC_UNAVAIL);
 4301         }
 4302 
 4303         if (strcmp(sim->sim_name, "xpt") != 0) {
 4304 
 4305                 sim->path_id =
 4306                     xptpathid(sim->sim_name, sim->unit_number, sim->bus_id);
 4307         }
 4308 
 4309         TAILQ_INIT(&new_bus->et_entries);
 4310         new_bus->path_id = sim->path_id;
 4311         new_bus->sim = sim;
 4312         timevalclear(&new_bus->last_reset);
 4313         new_bus->flags = 0;
 4314         new_bus->refcount = 1;  /* Held until a bus_deregister event */
 4315         new_bus->generation = 0;
 4316         s = splcam();
 4317         old_bus = TAILQ_FIRST(&xpt_busses);
 4318         while (old_bus != NULL
 4319             && old_bus->path_id < new_bus->path_id)
 4320                 old_bus = TAILQ_NEXT(old_bus, links);
 4321         if (old_bus != NULL)
 4322                 TAILQ_INSERT_BEFORE(old_bus, new_bus, links);
 4323         else
 4324                 TAILQ_INSERT_TAIL(&xpt_busses, new_bus, links);
 4325         bus_generation++;
 4326         splx(s);
 4327 
 4328         /* Notify interested parties */
 4329         if (sim->path_id != CAM_XPT_PATH_ID) {
 4330                 struct cam_path path;
 4331 
 4332                 xpt_compile_path(&path, /*periph*/NULL, sim->path_id,
 4333                                  CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
 4334                 xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
 4335                 cpi.ccb_h.func_code = XPT_PATH_INQ;
 4336                 xpt_action((union ccb *)&cpi);
 4337                 xpt_async(AC_PATH_REGISTERED, &path, &cpi);
 4338                 xpt_release_path(&path);
 4339         }
 4340         return (CAM_SUCCESS);
 4341 }
 4342 
 4343 int32_t
 4344 xpt_bus_deregister(path_id_t pathid)
 4345 {
 4346         struct cam_path bus_path;
 4347         cam_status status;
 4348 
 4349         status = xpt_compile_path(&bus_path, NULL, pathid,
 4350                                   CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
 4351         if (status != CAM_REQ_CMP)
 4352                 return (status);
 4353 
 4354         xpt_async(AC_LOST_DEVICE, &bus_path, NULL);
 4355         xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL);
 4356         
 4357         /* Release the reference count held while registered. */
 4358         xpt_release_bus(bus_path.bus);
 4359         xpt_release_path(&bus_path);
 4360 
 4361         return (CAM_REQ_CMP);
 4362 }
 4363 
 4364 static path_id_t
 4365 xptnextfreepathid(void)
 4366 {
 4367         struct cam_eb *bus;
 4368         path_id_t pathid;
 4369         const char *strval;
 4370 
 4371         pathid = 0;
 4372         bus = TAILQ_FIRST(&xpt_busses);
 4373 retry:
 4374         /* Find an unoccupied pathid */
 4375         while (bus != NULL
 4376             && bus->path_id <= pathid) {
 4377                 if (bus->path_id == pathid)
 4378                         pathid++;
 4379                 bus = TAILQ_NEXT(bus, links);
 4380         }
 4381 
 4382         /*
 4383          * Ensure that this pathid is not reserved for
 4384          * a bus that may be registered in the future.
 4385          */
 4386         if (resource_string_value("scbus", pathid, "at", &strval) == 0) {
 4387                 ++pathid;
 4388                 /* Start the search over */
 4389                 goto retry;
 4390         }
 4391         return (pathid);
 4392 }
 4393 
 4394 static path_id_t
 4395 xptpathid(const char *sim_name, int sim_unit, int sim_bus)
 4396 {
 4397         path_id_t pathid;
 4398         int i, dunit, val;
 4399         char buf[32];
 4400         const char *dname;
 4401 
 4402         pathid = CAM_XPT_PATH_ID;
 4403         snprintf(buf, sizeof(buf), "%s%d", sim_name, sim_unit);
 4404         i = 0;
 4405         while ((resource_find_match(&i, &dname, &dunit, "at", buf)) == 0) {
 4406                 if (strcmp(dname, "scbus")) {
 4407                         /* Avoid a bit of foot shooting. */
 4408                         continue;
 4409                 }
 4410                 if (dunit < 0)          /* unwired?! */
 4411                         continue;
 4412                 if (resource_int_value("scbus", dunit, "bus", &val) == 0) {
 4413                         if (sim_bus == val) {
 4414                                 pathid = dunit;
 4415                                 break;
 4416                         }
 4417                 } else if (sim_bus == 0) {
 4418                         /* Unspecified matches bus 0 */
 4419                         pathid = dunit;
 4420                         break;
 4421                 } else {
 4422                         printf("Ambiguous scbus configuration for %s%d "
 4423                                "bus %d, cannot wire down.  The kernel "
 4424                                "config entry for scbus%d should "
 4425                                "specify a controller bus.\n"
 4426                                "Scbus will be assigned dynamically.\n",
 4427                                sim_name, sim_unit, sim_bus, dunit);
 4428                         break;
 4429                 }
 4430         }
 4431 
 4432         if (pathid == CAM_XPT_PATH_ID)
 4433                 pathid = xptnextfreepathid();
 4434         return (pathid);
 4435 }
 4436 
 4437 void
 4438 xpt_async(u_int32_t async_code, struct cam_path *path, void *async_arg)
 4439 {
 4440         struct cam_eb *bus;
 4441         struct cam_et *target, *next_target;
 4442         struct cam_ed *device, *next_device;
 4443         int s;
 4444 
 4445         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_async\n"));
 4446 
 4447         /*
 4448          * Most async events come from a CAM interrupt context.  In
 4449          * a few cases, the error recovery code at the peripheral layer,
 4450          * which may run from our SWI or a process context, may signal
 4451          * deferred events with a call to xpt_async. Ensure async
 4452          * notifications are serialized by blocking cam interrupts.
 4453          */
 4454         s = splcam();
 4455 
 4456         bus = path->bus;
 4457 
 4458         if (async_code == AC_BUS_RESET) { 
 4459                 int s;
 4460 
 4461                 s = splclock();
 4462                 /* Update our notion of when the last reset occurred */
 4463                 microtime(&bus->last_reset);
 4464                 splx(s);
 4465         }
 4466 
 4467         for (target = TAILQ_FIRST(&bus->et_entries);
 4468              target != NULL;
 4469              target = next_target) {
 4470 
 4471                 next_target = TAILQ_NEXT(target, links);
 4472 
 4473                 if (path->target != target
 4474                  && path->target->target_id != CAM_TARGET_WILDCARD
 4475                  && target->target_id != CAM_TARGET_WILDCARD)
 4476                         continue;
 4477 
 4478                 if (async_code == AC_SENT_BDR) {
 4479                         int s;
 4480 
 4481                         /* Update our notion of when the last reset occurred */
 4482                         s = splclock();
 4483                         microtime(&path->target->last_reset);
 4484                         splx(s);
 4485                 }
 4486 
 4487                 for (device = TAILQ_FIRST(&target->ed_entries);
 4488                      device != NULL;
 4489                      device = next_device) {
 4490 
 4491                         next_device = TAILQ_NEXT(device, links);
 4492 
 4493                         if (path->device != device 
 4494                          && path->device->lun_id != CAM_LUN_WILDCARD
 4495                          && device->lun_id != CAM_LUN_WILDCARD)
 4496                                 continue;
 4497 
 4498                         xpt_dev_async(async_code, bus, target,
 4499                                       device, async_arg);
 4500 
 4501                         xpt_async_bcast(&device->asyncs, async_code,
 4502                                         path, async_arg);
 4503                 }
 4504         }
 4505         
 4506         /*
 4507          * If this wasn't a fully wildcarded async, tell all
 4508          * clients that want all async events.
 4509          */
 4510         if (bus != xpt_periph->path->bus)
 4511                 xpt_async_bcast(&xpt_periph->path->device->asyncs, async_code,
 4512                                 path, async_arg);
 4513         splx(s);
 4514 }
 4515 
 4516 static void
 4517 xpt_async_bcast(struct async_list *async_head,
 4518                 u_int32_t async_code,
 4519                 struct cam_path *path, void *async_arg)
 4520 {
 4521         struct async_node *cur_entry;
 4522 
 4523         cur_entry = SLIST_FIRST(async_head);
 4524         while (cur_entry != NULL) {
 4525                 struct async_node *next_entry;
 4526                 /*
 4527                  * Grab the next list entry before we call the current
 4528                  * entry's callback.  This is because the callback function
 4529                  * can delete its async callback entry.
 4530                  */
 4531                 next_entry = SLIST_NEXT(cur_entry, links);
 4532                 if ((cur_entry->event_enable & async_code) != 0)
 4533                         cur_entry->callback(cur_entry->callback_arg,
 4534                                             async_code, path,
 4535                                             async_arg);
 4536                 cur_entry = next_entry;
 4537         }
 4538 }
 4539 
 4540 /*
 4541  * Handle any per-device event notifications that require action by the XPT.
 4542  */
 4543 static void
 4544 xpt_dev_async(u_int32_t async_code, struct cam_eb *bus, struct cam_et *target,
 4545               struct cam_ed *device, void *async_arg)
 4546 {
 4547         cam_status status;
 4548         struct cam_path newpath;
 4549 
 4550         /*
 4551          * We only need to handle events for real devices.
 4552          */
 4553         if (target->target_id == CAM_TARGET_WILDCARD
 4554          || device->lun_id == CAM_LUN_WILDCARD)
 4555                 return;
 4556 
 4557         /*
 4558          * We need our own path with wildcards expanded to
 4559          * handle certain types of events.
 4560          */
 4561         if ((async_code == AC_SENT_BDR)
 4562          || (async_code == AC_BUS_RESET)
 4563          || (async_code == AC_INQ_CHANGED))
 4564                 status = xpt_compile_path(&newpath, NULL,
 4565                                           bus->path_id,
 4566                                           target->target_id,
 4567                                           device->lun_id);
 4568         else
 4569                 status = CAM_REQ_CMP_ERR;
 4570 
 4571         if (status == CAM_REQ_CMP) {
 4572 
 4573                 /*
 4574                  * Allow transfer negotiation to occur in a
 4575                  * tag free environment.
 4576                  */
 4577                 if (async_code == AC_SENT_BDR
 4578                  || async_code == AC_BUS_RESET)
 4579                         xpt_toggle_tags(&newpath);
 4580 
 4581                 if (async_code == AC_INQ_CHANGED) {
 4582                         /*
 4583                          * We've sent a start unit command, or
 4584                          * something similar to a device that
 4585                          * may have caused its inquiry data to
 4586                          * change. So we re-scan the device to
 4587                          * refresh the inquiry data for it.
 4588                          */
 4589                         xpt_scan_lun(newpath.periph, &newpath,
 4590                                      CAM_EXPECT_INQ_CHANGE, NULL);
 4591                 }
 4592                 xpt_release_path(&newpath);
 4593         } else if (async_code == AC_LOST_DEVICE) {
 4594                 device->flags |= CAM_DEV_UNCONFIGURED;
 4595         } else if (async_code == AC_TRANSFER_NEG) {
 4596                 struct ccb_trans_settings *settings;
 4597 
 4598                 settings = (struct ccb_trans_settings *)async_arg;
 4599                 xpt_set_transfer_settings(settings, device,
 4600                                           /*async_update*/TRUE);
 4601         }
 4602 }
 4603 
 4604 u_int32_t
 4605 xpt_freeze_devq(struct cam_path *path, u_int count)
 4606 {
 4607         int s;
 4608         struct ccb_hdr *ccbh;
 4609 
 4610         s = splcam();
 4611         path->device->qfrozen_cnt += count;
 4612 
 4613         /*
 4614          * Mark the last CCB in the queue as needing
 4615          * to be requeued if the driver hasn't
 4616          * changed it's state yet.  This fixes a race
 4617          * where a ccb is just about to be queued to
 4618          * a controller driver when it's interrupt routine
 4619          * freezes the queue.  To completly close the
 4620          * hole, controller drives must check to see
 4621          * if a ccb's status is still CAM_REQ_INPROG
 4622          * under spl protection just before they queue
 4623          * the CCB.  See ahc_action/ahc_freeze_devq for
 4624          * an example.
 4625          */
 4626         ccbh = TAILQ_LAST(&path->device->ccbq.active_ccbs, ccb_hdr_tailq);
 4627         if (ccbh && ccbh->status == CAM_REQ_INPROG)
 4628                 ccbh->status = CAM_REQUEUE_REQ;
 4629         splx(s);
 4630         return (path->device->qfrozen_cnt);
 4631 }
 4632 
 4633 u_int32_t
 4634 xpt_freeze_simq(struct cam_sim *sim, u_int count)
 4635 {
 4636         sim->devq->send_queue.qfrozen_cnt += count;
 4637         if (sim->devq->active_dev != NULL) {
 4638                 struct ccb_hdr *ccbh;
 4639                 
 4640                 ccbh = TAILQ_LAST(&sim->devq->active_dev->ccbq.active_ccbs,
 4641                                   ccb_hdr_tailq);
 4642                 if (ccbh && ccbh->status == CAM_REQ_INPROG)
 4643                         ccbh->status = CAM_REQUEUE_REQ;
 4644         }
 4645         return (sim->devq->send_queue.qfrozen_cnt);
 4646 }
 4647 
 4648 static void
 4649 xpt_release_devq_timeout(void *arg)
 4650 {
 4651         struct cam_ed *device;
 4652 
 4653         device = (struct cam_ed *)arg;
 4654 
 4655         xpt_release_devq_device(device, /*count*/1, /*run_queue*/TRUE);
 4656 }
 4657 
 4658 void
 4659 xpt_release_devq(struct cam_path *path, u_int count, int run_queue)
 4660 {
 4661         xpt_release_devq_device(path->device, count, run_queue);
 4662 }
 4663 
 4664 static void
 4665 xpt_release_devq_device(struct cam_ed *dev, u_int count, int run_queue)
 4666 {
 4667         int     rundevq;
 4668         int     s0, s1;
 4669 
 4670         rundevq = 0;
 4671         s0 = splsoftcam();
 4672         s1 = splcam();
 4673         if (dev->qfrozen_cnt > 0) {
 4674 
 4675                 count = (count > dev->qfrozen_cnt) ? dev->qfrozen_cnt : count;
 4676                 dev->qfrozen_cnt -= count;
 4677                 if (dev->qfrozen_cnt == 0) {
 4678 
 4679                         /*
 4680                          * No longer need to wait for a successful
 4681                          * command completion.
 4682                          */
 4683                         dev->flags &= ~CAM_DEV_REL_ON_COMPLETE;
 4684 
 4685                         /*
 4686                          * Remove any timeouts that might be scheduled
 4687                          * to release this queue.
 4688                          */
 4689                         if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
 4690                                 untimeout(xpt_release_devq_timeout, dev,
 4691                                           dev->c_handle);
 4692                                 dev->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING;
 4693                         }
 4694 
 4695                         /*
 4696                          * Now that we are unfrozen schedule the
 4697                          * device so any pending transactions are
 4698                          * run.
 4699                          */
 4700                         if ((dev->ccbq.queue.entries > 0)
 4701                          && (xpt_schedule_dev_sendq(dev->target->bus, dev))
 4702                          && (run_queue != 0)) {
 4703                                 rundevq = 1;
 4704                         }
 4705                 }
 4706         }
 4707         splx(s1);
 4708         if (rundevq != 0)
 4709                 xpt_run_dev_sendq(dev->target->bus);
 4710         splx(s0);
 4711 }
 4712 
 4713 void
 4714 xpt_release_simq(struct cam_sim *sim, int run_queue)
 4715 {
 4716         int     s;
 4717         struct  camq *sendq;
 4718 
 4719         sendq = &(sim->devq->send_queue);
 4720         s = splcam();
 4721         if (sendq->qfrozen_cnt > 0) {
 4722 
 4723                 sendq->qfrozen_cnt--;
 4724                 if (sendq->qfrozen_cnt == 0) {
 4725                         struct cam_eb *bus;
 4726 
 4727                         /*
 4728                          * If there is a timeout scheduled to release this
 4729                          * sim queue, remove it.  The queue frozen count is
 4730                          * already at 0.
 4731                          */
 4732                         if ((sim->flags & CAM_SIM_REL_TIMEOUT_PENDING) != 0){
 4733                                 untimeout(xpt_release_simq_timeout, sim,
 4734                                           sim->c_handle);
 4735                                 sim->flags &= ~CAM_SIM_REL_TIMEOUT_PENDING;
 4736                         }
 4737                         bus = xpt_find_bus(sim->path_id);
 4738                         splx(s);
 4739 
 4740                         if (run_queue) {
 4741                                 /*
 4742                                  * Now that we are unfrozen run the send queue.
 4743                                  */
 4744                                 xpt_run_dev_sendq(bus);
 4745                         }
 4746                         xpt_release_bus(bus);
 4747                 } else
 4748                         splx(s);
 4749         } else
 4750                 splx(s);
 4751 }
 4752 
 4753 static void
 4754 xpt_release_simq_timeout(void *arg)
 4755 {
 4756         struct cam_sim *sim;
 4757 
 4758         sim = (struct cam_sim *)arg;
 4759         xpt_release_simq(sim, /* run_queue */ TRUE);
 4760 }
 4761 
 4762 void
 4763 xpt_done(union ccb *done_ccb)
 4764 {
 4765         int s;
 4766 
 4767         s = splcam();
 4768 
 4769         CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_done\n"));
 4770         if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) != 0) {
 4771                 /*
 4772                  * Queue up the request for handling by our SWI handler
 4773                  * any of the "non-immediate" type of ccbs.
 4774                  */
 4775                 switch (done_ccb->ccb_h.path->periph->type) {
 4776                 case CAM_PERIPH_BIO:
 4777                         TAILQ_INSERT_TAIL(&cam_bioq, &done_ccb->ccb_h,
 4778                                           sim_links.tqe);
 4779                         done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX;
 4780                         swi_sched(cambio_ih, 0);
 4781                         break;
 4782                 case CAM_PERIPH_NET:
 4783                         TAILQ_INSERT_TAIL(&cam_netq, &done_ccb->ccb_h,
 4784                                           sim_links.tqe);
 4785                         done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX;
 4786                         swi_sched(camnet_ih, 0);
 4787                         break;
 4788                 }
 4789         }
 4790         splx(s);
 4791 }
 4792 
 4793 union ccb *
 4794 xpt_alloc_ccb()
 4795 {
 4796         union ccb *new_ccb;
 4797 
 4798         new_ccb = malloc(sizeof(*new_ccb), M_DEVBUF, M_WAITOK);
 4799         return (new_ccb);
 4800 }
 4801 
 4802 void
 4803 xpt_free_ccb(union ccb *free_ccb)
 4804 {
 4805         free(free_ccb, M_DEVBUF);
 4806 }
 4807 
 4808 
 4809 
 4810 /* Private XPT functions */
 4811 
 4812 /*
 4813  * Get a CAM control block for the caller. Charge the structure to the device
 4814  * referenced by the path.  If the this device has no 'credits' then the
 4815  * device already has the maximum number of outstanding operations under way
 4816  * and we return NULL. If we don't have sufficient resources to allocate more
 4817  * ccbs, we also return NULL.
 4818  */
 4819 static union ccb *
 4820 xpt_get_ccb(struct cam_ed *device)
 4821 {
 4822         union ccb *new_ccb;
 4823         int s;
 4824 
 4825         s = splsoftcam();
 4826         if ((new_ccb = (union ccb *)SLIST_FIRST(&ccb_freeq)) == NULL) {
 4827                 new_ccb = malloc(sizeof(*new_ccb), M_DEVBUF, M_NOWAIT);
 4828                 if (new_ccb == NULL) {
 4829                         splx(s);
 4830                         return (NULL);
 4831                 }
 4832                 callout_handle_init(&new_ccb->ccb_h.timeout_ch);
 4833                 SLIST_INSERT_HEAD(&ccb_freeq, &new_ccb->ccb_h,
 4834                                   xpt_links.sle);
 4835                 xpt_ccb_count++;
 4836         }
 4837         cam_ccbq_take_opening(&device->ccbq);
 4838         SLIST_REMOVE_HEAD(&ccb_freeq, xpt_links.sle);
 4839         splx(s);
 4840         return (new_ccb);
 4841 }
 4842 
 4843 static void
 4844 xpt_release_bus(struct cam_eb *bus)
 4845 {
 4846         int s;
 4847 
 4848         s = splcam();
 4849         if ((--bus->refcount == 0)
 4850          && (TAILQ_FIRST(&bus->et_entries) == NULL)) {
 4851                 TAILQ_REMOVE(&xpt_busses, bus, links);
 4852                 bus_generation++;
 4853                 splx(s);
 4854                 free(bus, M_DEVBUF);
 4855         } else
 4856                 splx(s);
 4857 }
 4858 
 4859 static struct cam_et *
 4860 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id)
 4861 {
 4862         struct cam_et *target;
 4863 
 4864         target = (struct cam_et *)malloc(sizeof(*target), M_DEVBUF, M_NOWAIT);
 4865         if (target != NULL) {
 4866                 struct cam_et *cur_target;
 4867 
 4868                 TAILQ_INIT(&target->ed_entries);
 4869                 target->bus = bus;
 4870                 target->target_id = target_id;
 4871                 target->refcount = 1;
 4872                 target->generation = 0;
 4873                 timevalclear(&target->last_reset);
 4874                 /*
 4875                  * Hold a reference to our parent bus so it
 4876                  * will not go away before we do.
 4877                  */
 4878                 bus->refcount++;
 4879 
 4880                 /* Insertion sort into our bus's target list */
 4881                 cur_target = TAILQ_FIRST(&bus->et_entries);
 4882                 while (cur_target != NULL && cur_target->target_id < target_id)
 4883                         cur_target = TAILQ_NEXT(cur_target, links);
 4884 
 4885                 if (cur_target != NULL) {
 4886                         TAILQ_INSERT_BEFORE(cur_target, target, links);
 4887                 } else {
 4888                         TAILQ_INSERT_TAIL(&bus->et_entries, target, links);
 4889                 }
 4890                 bus->generation++;
 4891         }
 4892         return (target);
 4893 }
 4894 
 4895 static void
 4896 xpt_release_target(struct cam_eb *bus, struct cam_et *target)
 4897 {
 4898         int s;
 4899 
 4900         s = splcam();
 4901         if ((--target->refcount == 0)
 4902          && (TAILQ_FIRST(&target->ed_entries) == NULL)) {
 4903                 TAILQ_REMOVE(&bus->et_entries, target, links);
 4904                 bus->generation++;
 4905                 splx(s);
 4906                 free(target, M_DEVBUF);
 4907                 xpt_release_bus(bus);
 4908         } else
 4909                 splx(s);
 4910 }
 4911 
 4912 static struct cam_ed *
 4913 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id)
 4914 {
 4915 #ifdef CAM_NEW_TRAN_CODE
 4916         struct     cam_path path;
 4917 #endif /* CAM_NEW_TRAN_CODE */
 4918         struct     cam_ed *device;
 4919         struct     cam_devq *devq;
 4920         cam_status status;
 4921 
 4922         /* Make space for us in the device queue on our bus */
 4923         devq = bus->sim->devq;
 4924         status = cam_devq_resize(devq, devq->alloc_queue.array_size + 1);
 4925 
 4926         if (status != CAM_REQ_CMP) {
 4927                 device = NULL;
 4928         } else {
 4929                 device = (struct cam_ed *)malloc(sizeof(*device),
 4930                                                  M_DEVBUF, M_NOWAIT);
 4931         }
 4932 
 4933         if (device != NULL) {
 4934                 struct cam_ed *cur_device;
 4935 
 4936                 cam_init_pinfo(&device->alloc_ccb_entry.pinfo);
 4937                 device->alloc_ccb_entry.device = device;
 4938                 cam_init_pinfo(&device->send_ccb_entry.pinfo);
 4939                 device->send_ccb_entry.device = device;
 4940                 device->target = target;
 4941                 device->lun_id = lun_id;
 4942                 /* Initialize our queues */
 4943                 if (camq_init(&device->drvq, 0) != 0) {
 4944                         free(device, M_DEVBUF);
 4945                         return (NULL);
 4946                 }
 4947                 if (cam_ccbq_init(&device->ccbq,
 4948                                   bus->sim->max_dev_openings) != 0) {
 4949                         camq_fini(&device->drvq);
 4950                         free(device, M_DEVBUF);
 4951                         return (NULL);
 4952                 }
 4953                 SLIST_INIT(&device->asyncs);
 4954                 SLIST_INIT(&device->periphs);
 4955                 device->generation = 0;
 4956                 device->owner = NULL;
 4957                 /*
 4958                  * Take the default quirk entry until we have inquiry
 4959                  * data and can determine a better quirk to use.
 4960                  */
 4961                 device->quirk = &xpt_quirk_table[xpt_quirk_table_size - 1];
 4962                 bzero(&device->inq_data, sizeof(device->inq_data));
 4963                 device->inq_flags = 0;
 4964                 device->queue_flags = 0;
 4965                 device->serial_num = NULL;
 4966                 device->serial_num_len = 0;
 4967                 device->qfrozen_cnt = 0;
 4968                 device->flags = CAM_DEV_UNCONFIGURED;
 4969                 device->tag_delay_count = 0;
 4970                 device->refcount = 1;
 4971                 callout_handle_init(&device->c_handle);
 4972 
 4973                 /*
 4974                  * Hold a reference to our parent target so it
 4975                  * will not go away before we do.
 4976                  */
 4977                 target->refcount++;
 4978 
 4979                 /*
 4980                  * XXX should be limited by number of CCBs this bus can
 4981                  * do.
 4982                  */
 4983                 xpt_max_ccbs += device->ccbq.devq_openings;
 4984                 /* Insertion sort into our target's device list */
 4985                 cur_device = TAILQ_FIRST(&target->ed_entries);
 4986                 while (cur_device != NULL && cur_device->lun_id < lun_id)
 4987                         cur_device = TAILQ_NEXT(cur_device, links);
 4988                 if (cur_device != NULL) {
 4989                         TAILQ_INSERT_BEFORE(cur_device, device, links);
 4990                 } else {
 4991                         TAILQ_INSERT_TAIL(&target->ed_entries, device, links);
 4992                 }
 4993                 target->generation++;
 4994 #ifdef CAM_NEW_TRAN_CODE
 4995                 if (lun_id != CAM_LUN_WILDCARD) {
 4996                         xpt_compile_path(&path,
 4997                                          NULL,
 4998                                          bus->path_id,
 4999                                          target->target_id,
 5000                                          lun_id);
 5001                         xpt_devise_transport(&path);
 5002                         xpt_release_path(&path);
 5003                 }
 5004 #endif /* CAM_NEW_TRAN_CODE */
 5005         }
 5006         return (device);
 5007 }
 5008 
 5009 static void
 5010 xpt_release_device(struct cam_eb *bus, struct cam_et *target,
 5011                    struct cam_ed *device)
 5012 {
 5013         int s;
 5014 
 5015         s = splcam();
 5016         if ((--device->refcount == 0)
 5017          && ((device->flags & CAM_DEV_UNCONFIGURED) != 0)) {
 5018                 struct cam_devq *devq;
 5019 
 5020                 if (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX
 5021                  || device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX)
 5022                         panic("Removing device while still queued for ccbs");
 5023 
 5024                 if ((device->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0)
 5025                                 untimeout(xpt_release_devq_timeout, device,
 5026                                           device->c_handle);
 5027 
 5028                 TAILQ_REMOVE(&target->ed_entries, device,links);
 5029                 target->generation++;
 5030                 xpt_max_ccbs -= device->ccbq.devq_openings;
 5031                 /* Release our slot in the devq */
 5032                 devq = bus->sim->devq;
 5033                 cam_devq_resize(devq, devq->alloc_queue.array_size - 1);
 5034                 splx(s);
 5035                 free(device, M_DEVBUF);
 5036                 xpt_release_target(bus, target);
 5037         } else
 5038                 splx(s);
 5039 }
 5040 
 5041 static u_int32_t
 5042 xpt_dev_ccbq_resize(struct cam_path *path, int newopenings)
 5043 {
 5044         int     s;
 5045         int     diff;
 5046         int     result;
 5047         struct  cam_ed *dev;
 5048 
 5049         dev = path->device;
 5050         s = splsoftcam();
 5051 
 5052         diff = newopenings - (dev->ccbq.dev_active + dev->ccbq.dev_openings);
 5053         result = cam_ccbq_resize(&dev->ccbq, newopenings);
 5054         if (result == CAM_REQ_CMP && (diff < 0)) {
 5055                 dev->flags |= CAM_DEV_RESIZE_QUEUE_NEEDED;
 5056         }
 5057         /* Adjust the global limit */
 5058         xpt_max_ccbs += diff;
 5059         splx(s);
 5060         return (result);
 5061 }
 5062 
 5063 static struct cam_eb *
 5064 xpt_find_bus(path_id_t path_id)
 5065 {
 5066         struct cam_eb *bus;
 5067 
 5068         for (bus = TAILQ_FIRST(&xpt_busses);
 5069              bus != NULL;
 5070              bus = TAILQ_NEXT(bus, links)) {
 5071                 if (bus->path_id == path_id) {
 5072                         bus->refcount++;
 5073                         break;
 5074                 }
 5075         }
 5076         return (bus);
 5077 }
 5078 
 5079 static struct cam_et *
 5080 xpt_find_target(struct cam_eb *bus, target_id_t target_id)
 5081 {
 5082         struct cam_et *target;
 5083 
 5084         for (target = TAILQ_FIRST(&bus->et_entries);
 5085              target != NULL;
 5086              target = TAILQ_NEXT(target, links)) {
 5087                 if (target->target_id == target_id) {
 5088                         target->refcount++;
 5089                         break;
 5090                 }
 5091         }
 5092         return (target);
 5093 }
 5094 
 5095 static struct cam_ed *
 5096 xpt_find_device(struct cam_et *target, lun_id_t lun_id)
 5097 {
 5098         struct cam_ed *device;
 5099 
 5100         for (device = TAILQ_FIRST(&target->ed_entries);
 5101              device != NULL;
 5102              device = TAILQ_NEXT(device, links)) {
 5103                 if (device->lun_id == lun_id) {
 5104                         device->refcount++;
 5105                         break;
 5106                 }
 5107         }
 5108         return (device);
 5109 }
 5110 
 5111 typedef struct {
 5112         union   ccb *request_ccb;
 5113         struct  ccb_pathinq *cpi;
 5114         int     pending_count;
 5115 } xpt_scan_bus_info;
 5116 
 5117 /*
 5118  * To start a scan, request_ccb is an XPT_SCAN_BUS ccb.
 5119  * As the scan progresses, xpt_scan_bus is used as the
 5120  * callback on completion function.
 5121  */
 5122 static void
 5123 xpt_scan_bus(struct cam_periph *periph, union ccb *request_ccb)
 5124 {
 5125         CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE,
 5126                   ("xpt_scan_bus\n"));
 5127         switch (request_ccb->ccb_h.func_code) {
 5128         case XPT_SCAN_BUS:
 5129         {
 5130                 xpt_scan_bus_info *scan_info;
 5131                 union   ccb *work_ccb;
 5132                 struct  cam_path *path;
 5133                 u_int   i;
 5134                 u_int   max_target;
 5135                 u_int   initiator_id;
 5136 
 5137                 /* Find out the characteristics of the bus */
 5138                 work_ccb = xpt_alloc_ccb();
 5139                 xpt_setup_ccb(&work_ccb->ccb_h, request_ccb->ccb_h.path,
 5140                               request_ccb->ccb_h.pinfo.priority);
 5141                 work_ccb->ccb_h.func_code = XPT_PATH_INQ;
 5142                 xpt_action(work_ccb);
 5143                 if (work_ccb->ccb_h.status != CAM_REQ_CMP) {
 5144                         request_ccb->ccb_h.status = work_ccb->ccb_h.status;
 5145                         xpt_free_ccb(work_ccb);
 5146                         xpt_done(request_ccb);
 5147                         return;
 5148                 }
 5149 
 5150                 if ((work_ccb->cpi.hba_misc & PIM_NOINITIATOR) != 0) {
 5151                         /*
 5152                          * Can't scan the bus on an adapter that
 5153                          * cannot perform the initiator role.
 5154                          */
 5155                         request_ccb->ccb_h.status = CAM_REQ_CMP;
 5156                         xpt_free_ccb(work_ccb);
 5157                         xpt_done(request_ccb);
 5158                         return;
 5159                 }
 5160 
 5161                 /* Save some state for use while we probe for devices */
 5162                 scan_info = (xpt_scan_bus_info *)
 5163                     malloc(sizeof(xpt_scan_bus_info), M_TEMP, M_WAITOK);
 5164                 scan_info->request_ccb = request_ccb;
 5165                 scan_info->cpi = &work_ccb->cpi;
 5166 
 5167                 /* Cache on our stack so we can work asynchronously */
 5168                 max_target = scan_info->cpi->max_target;
 5169                 initiator_id = scan_info->cpi->initiator_id;
 5170 
 5171                 /*
 5172                  * Don't count the initiator if the
 5173                  * initiator is addressable.
 5174                  */
 5175                 scan_info->pending_count = max_target + 1;
 5176                 if (initiator_id <= max_target)
 5177                         scan_info->pending_count--;
 5178 
 5179                 for (i = 0; i <= max_target; i++) {
 5180                         cam_status status;
 5181                         if (i == initiator_id)
 5182                                 continue;
 5183 
 5184                         status = xpt_create_path(&path, xpt_periph,
 5185                                                  request_ccb->ccb_h.path_id,
 5186                                                  i, 0);
 5187                         if (status != CAM_REQ_CMP) {
 5188                                 printf("xpt_scan_bus: xpt_create_path failed"
 5189                                        " with status %#x, bus scan halted\n",
 5190                                        status);
 5191                                 break;
 5192                         }
 5193                         work_ccb = xpt_alloc_ccb();
 5194                         xpt_setup_ccb(&work_ccb->ccb_h, path,
 5195                                       request_ccb->ccb_h.pinfo.priority);
 5196                         work_ccb->ccb_h.func_code = XPT_SCAN_LUN;
 5197                         work_ccb->ccb_h.cbfcnp = xpt_scan_bus;
 5198                         work_ccb->ccb_h.ppriv_ptr0 = scan_info;
 5199                         work_ccb->crcn.flags = request_ccb->crcn.flags;
 5200                         xpt_action(work_ccb);
 5201                 }
 5202                 break;
 5203         }
 5204         case XPT_SCAN_LUN:
 5205         {
 5206                 xpt_scan_bus_info *scan_info;
 5207                 path_id_t path_id;
 5208                 target_id_t target_id;
 5209                 lun_id_t lun_id;
 5210 
 5211                 /* Reuse the same CCB to query if a device was really found */
 5212                 scan_info = (xpt_scan_bus_info *)request_ccb->ccb_h.ppriv_ptr0;
 5213                 xpt_setup_ccb(&request_ccb->ccb_h, request_ccb->ccb_h.path,
 5214                               request_ccb->ccb_h.pinfo.priority);
 5215                 request_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
 5216 
 5217                 path_id = request_ccb->ccb_h.path_id;
 5218                 target_id = request_ccb->ccb_h.target_id;
 5219                 lun_id = request_ccb->ccb_h.target_lun;
 5220                 xpt_action(request_ccb);
 5221 
 5222                 if (request_ccb->ccb_h.status != CAM_REQ_CMP) {
 5223                         struct cam_ed *device;
 5224                         struct cam_et *target;
 5225                         int s, phl;
 5226 
 5227                         /*
 5228                          * If we already probed lun 0 successfully, or
 5229                          * we have additional configured luns on this
 5230                          * target that might have "gone away", go onto
 5231                          * the next lun.
 5232                          */
 5233                         target = request_ccb->ccb_h.path->target;
 5234                         /*
 5235                          * We may touch devices that we don't
 5236                          * hold references too, so ensure they
 5237                          * don't disappear out from under us.
 5238                          * The target above is referenced by the
 5239                          * path in the request ccb.
 5240                          */
 5241                         phl = 0;
 5242                         s = splcam();
 5243                         device = TAILQ_FIRST(&target->ed_entries);
 5244                         if (device != NULL) {
 5245                                 phl = device->quirk->quirks & CAM_QUIRK_HILUNS;
 5246                                 if (device->lun_id == 0)
 5247                                         device = TAILQ_NEXT(device, links);
 5248                         }
 5249                         splx(s);
 5250                         if ((lun_id != 0) || (device != NULL)) {
 5251                                 if (lun_id < (CAM_SCSI2_MAXLUN-1) || phl)
 5252                                         lun_id++;
 5253                         }
 5254                 } else {
 5255                         struct cam_ed *device;
 5256                         
 5257                         device = request_ccb->ccb_h.path->device;
 5258 
 5259                         if ((device->quirk->quirks & CAM_QUIRK_NOLUNS) == 0) {
 5260                                 /* Try the next lun */
 5261                                 if (lun_id < (CAM_SCSI2_MAXLUN-1) ||
 5262                                     (device->quirk->quirks & CAM_QUIRK_HILUNS))
 5263                                         lun_id++;
 5264                         }
 5265                 }
 5266 
 5267                 xpt_free_path(request_ccb->ccb_h.path);
 5268 
 5269                 /* Check Bounds */
 5270                 if ((lun_id == request_ccb->ccb_h.target_lun)
 5271                  || lun_id > scan_info->cpi->max_lun) {
 5272                         /* We're done */
 5273 
 5274                         xpt_free_ccb(request_ccb);
 5275                         scan_info->pending_count--;
 5276                         if (scan_info->pending_count == 0) {
 5277                                 xpt_free_ccb((union ccb *)scan_info->cpi);
 5278                                 request_ccb = scan_info->request_ccb;
 5279                                 free(scan_info, M_TEMP);
 5280                                 request_ccb->ccb_h.status = CAM_REQ_CMP;
 5281                                 xpt_done(request_ccb);
 5282                         }
 5283                 } else {
 5284                         /* Try the next device */
 5285                         struct cam_path *path;
 5286                         cam_status status;
 5287 
 5288                         path = request_ccb->ccb_h.path;
 5289                         status = xpt_create_path(&path, xpt_periph,
 5290                                                  path_id, target_id, lun_id);
 5291                         if (status != CAM_REQ_CMP) {
 5292                                 printf("xpt_scan_bus: xpt_create_path failed "
 5293                                        "with status %#x, halting LUN scan\n",
 5294                                        status);
 5295                                 xpt_free_ccb(request_ccb);
 5296                                 scan_info->pending_count--;
 5297                                 if (scan_info->pending_count == 0) {
 5298                                         xpt_free_ccb(
 5299                                                 (union ccb *)scan_info->cpi);
 5300                                         request_ccb = scan_info->request_ccb;
 5301                                         free(scan_info, M_TEMP);
 5302                                         request_ccb->ccb_h.status = CAM_REQ_CMP;
 5303                                         xpt_done(request_ccb);
 5304                                         break;
 5305                                 }
 5306                         }
 5307                         xpt_setup_ccb(&request_ccb->ccb_h, path,
 5308                                       request_ccb->ccb_h.pinfo.priority);
 5309                         request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
 5310                         request_ccb->ccb_h.cbfcnp = xpt_scan_bus;
 5311                         request_ccb->ccb_h.ppriv_ptr0 = scan_info;
 5312                         request_ccb->crcn.flags =
 5313                                 scan_info->request_ccb->crcn.flags;
 5314                         xpt_action(request_ccb);
 5315                 }
 5316                 break;
 5317         }
 5318         default:
 5319                 break;
 5320         }
 5321 }
 5322 
 5323 typedef enum {
 5324         PROBE_TUR,
 5325         PROBE_INQUIRY,
 5326         PROBE_FULL_INQUIRY,
 5327         PROBE_MODE_SENSE,
 5328         PROBE_SERIAL_NUM,
 5329         PROBE_TUR_FOR_NEGOTIATION
 5330 } probe_action;
 5331 
 5332 typedef enum {
 5333         PROBE_INQUIRY_CKSUM     = 0x01,
 5334         PROBE_SERIAL_CKSUM      = 0x02,
 5335         PROBE_NO_ANNOUNCE       = 0x04
 5336 } probe_flags;
 5337 
 5338 typedef struct {
 5339         TAILQ_HEAD(, ccb_hdr) request_ccbs;
 5340         probe_action    action;
 5341         union ccb       saved_ccb;
 5342         probe_flags     flags;
 5343         MD5_CTX         context;
 5344         u_int8_t        digest[16];
 5345 } probe_softc;
 5346 
 5347 static void
 5348 xpt_scan_lun(struct cam_periph *periph, struct cam_path *path,
 5349              cam_flags flags, union ccb *request_ccb)
 5350 {
 5351         struct ccb_pathinq cpi;
 5352         cam_status status;
 5353         struct cam_path *new_path;
 5354         struct cam_periph *old_periph;
 5355         int s;
 5356         
 5357         CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE,
 5358                   ("xpt_scan_lun\n"));
 5359         
 5360         xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
 5361         cpi.ccb_h.func_code = XPT_PATH_INQ;
 5362         xpt_action((union ccb *)&cpi);
 5363 
 5364         if (cpi.ccb_h.status != CAM_REQ_CMP) {
 5365                 if (request_ccb != NULL) {
 5366                         request_ccb->ccb_h.status = cpi.ccb_h.status;
 5367                         xpt_done(request_ccb);
 5368                 }
 5369                 return;
 5370         }
 5371 
 5372         if ((cpi.hba_misc & PIM_NOINITIATOR) != 0) {
 5373                 /*
 5374                  * Can't scan the bus on an adapter that
 5375                  * cannot perform the initiator role.
 5376                  */
 5377                 if (request_ccb != NULL) {
 5378                         request_ccb->ccb_h.status = CAM_REQ_CMP;
 5379                         xpt_done(request_ccb);
 5380                 }
 5381                 return;
 5382         }
 5383 
 5384         if (request_ccb == NULL) {
 5385                 request_ccb = malloc(sizeof(union ccb), M_TEMP, M_NOWAIT);
 5386                 if (request_ccb == NULL) {
 5387                         xpt_print_path(path);
 5388                         printf("xpt_scan_lun: can't allocate CCB, can't "
 5389                                "continue\n");
 5390                         return;
 5391                 }
 5392                 new_path = malloc(sizeof(*new_path), M_TEMP, M_NOWAIT);
 5393                 if (new_path == NULL) {
 5394                         xpt_print_path(path);
 5395                         printf("xpt_scan_lun: can't allocate path, can't "
 5396                                "continue\n");
 5397                         free(request_ccb, M_TEMP);
 5398                         return;
 5399                 }
 5400                 status = xpt_compile_path(new_path, xpt_periph,
 5401                                           path->bus->path_id,
 5402                                           path->target->target_id,
 5403                                           path->device->lun_id);
 5404 
 5405                 if (status != CAM_REQ_CMP) {
 5406                         xpt_print_path(path);
 5407                         printf("xpt_scan_lun: can't compile path, can't "
 5408                                "continue\n");
 5409                         free(request_ccb, M_TEMP);
 5410                         free(new_path, M_TEMP);
 5411                         return;
 5412                 }
 5413                 xpt_setup_ccb(&request_ccb->ccb_h, new_path, /*priority*/ 1);
 5414                 request_ccb->ccb_h.cbfcnp = xptscandone;
 5415                 request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
 5416                 request_ccb->crcn.flags = flags;
 5417         }
 5418 
 5419         s = splsoftcam();
 5420         if ((old_periph = cam_periph_find(path, "probe")) != NULL) {
 5421                 probe_softc *softc;
 5422 
 5423                 softc = (probe_softc *)old_periph->softc;
 5424                 TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h,
 5425                                   periph_links.tqe);
 5426         } else {
 5427                 status = cam_periph_alloc(proberegister, NULL, probecleanup,
 5428                                           probestart, "probe",
 5429                                           CAM_PERIPH_BIO,
 5430                                           request_ccb->ccb_h.path, NULL, 0,
 5431                                           request_ccb);
 5432 
 5433                 if (status != CAM_REQ_CMP) {
 5434                         xpt_print_path(path);
 5435                         printf("xpt_scan_lun: cam_alloc_periph returned an "
 5436                                "error, can't continue probe\n");
 5437                         request_ccb->ccb_h.status = status;
 5438                         xpt_done(request_ccb);
 5439                 }
 5440         }
 5441         splx(s);
 5442 }
 5443 
 5444 static void
 5445 xptscandone(struct cam_periph *periph, union ccb *done_ccb)
 5446 {
 5447         xpt_release_path(done_ccb->ccb_h.path);
 5448         free(done_ccb->ccb_h.path, M_TEMP);
 5449         free(done_ccb, M_TEMP);
 5450 }
 5451 
 5452 static cam_status
 5453 proberegister(struct cam_periph *periph, void *arg)
 5454 {
 5455         union ccb *request_ccb; /* CCB representing the probe request */
 5456         probe_softc *softc;
 5457 
 5458         request_ccb = (union ccb *)arg;
 5459         if (periph == NULL) {
 5460                 printf("proberegister: periph was NULL!!\n");
 5461                 return(CAM_REQ_CMP_ERR);
 5462         }
 5463 
 5464         if (request_ccb == NULL) {
 5465                 printf("proberegister: no probe CCB, "
 5466                        "can't register device\n");
 5467                 return(CAM_REQ_CMP_ERR);
 5468         }
 5469 
 5470         softc = (probe_softc *)malloc(sizeof(*softc), M_TEMP, M_NOWAIT);
 5471 
 5472         if (softc == NULL) {
 5473                 printf("proberegister: Unable to probe new device. "
 5474                        "Unable to allocate softc\n");                           
 5475                 return(CAM_REQ_CMP_ERR);
 5476         }
 5477         TAILQ_INIT(&softc->request_ccbs);
 5478         TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h,
 5479                           periph_links.tqe);
 5480         softc->flags = 0;
 5481         periph->softc = softc;
 5482         cam_periph_acquire(periph);
 5483         /*
 5484          * Ensure we've waited at least a bus settle
 5485          * delay before attempting to probe the device.
 5486          * For HBAs that don't do bus resets, this won't make a difference.
 5487          */
 5488         cam_periph_freeze_after_event(periph, &periph->path->bus->last_reset,
 5489                                       scsi_delay);
 5490         probeschedule(periph);
 5491         return(CAM_REQ_CMP);
 5492 }
 5493 
 5494 static void
 5495 probeschedule(struct cam_periph *periph)
 5496 {
 5497         struct ccb_pathinq cpi;
 5498         union ccb *ccb;
 5499         probe_softc *softc;
 5500 
 5501         softc = (probe_softc *)periph->softc;
 5502         ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs);
 5503 
 5504         xpt_setup_ccb(&cpi.ccb_h, periph->path, /*priority*/1);
 5505         cpi.ccb_h.func_code = XPT_PATH_INQ;
 5506         xpt_action((union ccb *)&cpi);
 5507 
 5508         /*
 5509          * If a device has gone away and another device, or the same one,
 5510          * is back in the same place, it should have a unit attention
 5511          * condition pending.  It will not report the unit attention in
 5512          * response to an inquiry, which may leave invalid transfer
 5513          * negotiations in effect.  The TUR will reveal the unit attention
 5514          * condition.  Only send the TUR for lun 0, since some devices 
 5515          * will get confused by commands other than inquiry to non-existent
 5516          * luns.  If you think a device has gone away start your scan from
 5517          * lun 0.  This will insure that any bogus transfer settings are
 5518          * invalidated.
 5519          *
 5520          * If we haven't seen the device before and the controller supports
 5521          * some kind of transfer negotiation, negotiate with the first
 5522          * sent command if no bus reset was performed at startup.  This
 5523          * ensures that the device is not confused by transfer negotiation
 5524          * settings left over by loader or BIOS action.
 5525          */
 5526         if (((ccb->ccb_h.path->device->flags & CAM_DEV_UNCONFIGURED) == 0)
 5527          && (ccb->ccb_h.target_lun == 0)) {
 5528                 softc->action = PROBE_TUR;
 5529         } else if ((cpi.hba_inquiry & (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE)) != 0
 5530               && (cpi.hba_misc & PIM_NOBUSRESET) != 0) {
 5531                 proberequestdefaultnegotiation(periph);
 5532                 softc->action = PROBE_INQUIRY;
 5533         } else {
 5534                 softc->action = PROBE_INQUIRY;
 5535         }
 5536 
 5537         if (ccb->crcn.flags & CAM_EXPECT_INQ_CHANGE)
 5538                 softc->flags |= PROBE_NO_ANNOUNCE;
 5539         else
 5540                 softc->flags &= ~PROBE_NO_ANNOUNCE;
 5541 
 5542         xpt_schedule(periph, ccb->ccb_h.pinfo.priority);
 5543 }
 5544 
 5545 static void
 5546 probestart(struct cam_periph *periph, union ccb *start_ccb)
 5547 {
 5548         /* Probe the device that our peripheral driver points to */
 5549         struct ccb_scsiio *csio;
 5550         probe_softc *softc;
 5551 
 5552         CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probestart\n"));
 5553 
 5554         softc = (probe_softc *)periph->softc;
 5555         csio = &start_ccb->csio;
 5556 
 5557         switch (softc->action) {
 5558         case PROBE_TUR:
 5559         case PROBE_TUR_FOR_NEGOTIATION:
 5560         {
 5561                 scsi_test_unit_ready(csio,
 5562                                      /*retries*/4,
 5563                                      probedone,
 5564                                      MSG_SIMPLE_Q_TAG,
 5565                                      SSD_FULL_SIZE,
 5566                                      /*timeout*/60000);
 5567                 break;
 5568         }
 5569         case PROBE_INQUIRY:
 5570         case PROBE_FULL_INQUIRY:
 5571         {
 5572                 u_int inquiry_len;
 5573                 struct scsi_inquiry_data *inq_buf;
 5574 
 5575                 inq_buf = &periph->path->device->inq_data;
 5576                 /*
 5577                  * If the device is currently configured, we calculate an
 5578                  * MD5 checksum of the inquiry data, and if the serial number
 5579                  * length is greater than 0, add the serial number data
 5580                  * into the checksum as well.  Once the inquiry and the
 5581                  * serial number check finish, we attempt to figure out
 5582                  * whether we still have the same device.
 5583                  */
 5584                 if ((periph->path->device->flags & CAM_DEV_UNCONFIGURED) == 0) {
 5585                         
 5586                         MD5Init(&softc->context);
 5587                         MD5Update(&softc->context, (unsigned char *)inq_buf,
 5588                                   sizeof(struct scsi_inquiry_data));
 5589                         softc->flags |= PROBE_INQUIRY_CKSUM;
 5590                         if (periph->path->device->serial_num_len > 0) {
 5591                                 MD5Update(&softc->context,
 5592                                           periph->path->device->serial_num,
 5593                                           periph->path->device->serial_num_len);
 5594                                 softc->flags |= PROBE_SERIAL_CKSUM;
 5595                         }
 5596                         MD5Final(softc->digest, &softc->context);
 5597                 } 
 5598 
 5599                 if (softc->action == PROBE_INQUIRY)
 5600                         inquiry_len = SHORT_INQUIRY_LENGTH;
 5601                 else
 5602                         inquiry_len = inq_buf->additional_length + 4;
 5603         
 5604                 scsi_inquiry(csio,
 5605                              /*retries*/4,
 5606                              probedone,
 5607                              MSG_SIMPLE_Q_TAG,
 5608                              (u_int8_t *)inq_buf,
 5609                              inquiry_len,
 5610                              /*evpd*/FALSE,
 5611                              /*page_code*/0,
 5612                              SSD_MIN_SIZE,
 5613                              /*timeout*/60 * 1000);
 5614                 break;
 5615         }
 5616         case PROBE_MODE_SENSE:
 5617         {
 5618                 void  *mode_buf;
 5619                 int    mode_buf_len;
 5620 
 5621                 mode_buf_len = sizeof(struct scsi_mode_header_6)
 5622                              + sizeof(struct scsi_mode_blk_desc)
 5623                              + sizeof(struct scsi_control_page);
 5624                 mode_buf = malloc(mode_buf_len, M_TEMP, M_NOWAIT);
 5625                 if (mode_buf != NULL) {
 5626                         scsi_mode_sense(csio,
 5627                                         /*retries*/4,
 5628                                         probedone,
 5629                                         MSG_SIMPLE_Q_TAG,
 5630                                         /*dbd*/FALSE,
 5631                                         SMS_PAGE_CTRL_CURRENT,
 5632                                         SMS_CONTROL_MODE_PAGE,
 5633                                         mode_buf,
 5634                                         mode_buf_len,
 5635                                         SSD_FULL_SIZE,
 5636                                         /*timeout*/60000);
 5637                         break;
 5638                 }
 5639                 xpt_print_path(periph->path);
 5640                 printf("Unable to mode sense control page - malloc failure\n");
 5641                 softc->action = PROBE_SERIAL_NUM;
 5642                 /* FALLTHROUGH */
 5643         }
 5644         case PROBE_SERIAL_NUM:
 5645         {
 5646                 struct scsi_vpd_unit_serial_number *serial_buf;
 5647                 struct cam_ed* device;
 5648 
 5649                 serial_buf = NULL;
 5650                 device = periph->path->device;
 5651                 device->serial_num = NULL;
 5652                 device->serial_num_len = 0;
 5653 
 5654                 if ((device->quirk->quirks & CAM_QUIRK_NOSERIAL) == 0)
 5655                         serial_buf = (struct scsi_vpd_unit_serial_number *)
 5656                                 malloc(sizeof(*serial_buf), M_TEMP,
 5657                                         M_NOWAIT | M_ZERO);
 5658 
 5659                 if (serial_buf != NULL) {
 5660                         scsi_inquiry(csio,
 5661                                      /*retries*/4,
 5662                                      probedone,
 5663                                      MSG_SIMPLE_Q_TAG,
 5664                                      (u_int8_t *)serial_buf,
 5665                                      sizeof(*serial_buf),
 5666                                      /*evpd*/TRUE,
 5667                                      SVPD_UNIT_SERIAL_NUMBER,
 5668                                      SSD_MIN_SIZE,
 5669                                      /*timeout*/60 * 1000);
 5670                         break;
 5671                 }
 5672                 /*
 5673                  * We'll have to do without, let our probedone
 5674                  * routine finish up for us.
 5675                  */
 5676                 start_ccb->csio.data_ptr = NULL;
 5677                 probedone(periph, start_ccb);
 5678                 return;
 5679         }
 5680         }
 5681         xpt_action(start_ccb);
 5682 }
 5683 
 5684 static void
 5685 proberequestdefaultnegotiation(struct cam_periph *periph)
 5686 {
 5687         struct ccb_trans_settings cts;
 5688 
 5689         xpt_setup_ccb(&cts.ccb_h, periph->path, /*priority*/1);
 5690         cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
 5691 #ifdef CAM_NEW_TRAN_CODE
 5692         cts.type = CTS_TYPE_USER_SETTINGS;
 5693 #else /* CAM_NEW_TRAN_CODE */
 5694         cts.flags = CCB_TRANS_USER_SETTINGS;
 5695 #endif /* CAM_NEW_TRAN_CODE */
 5696         xpt_action((union ccb *)&cts);
 5697         cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
 5698 #ifdef CAM_NEW_TRAN_CODE
 5699         cts.type = CTS_TYPE_CURRENT_SETTINGS;
 5700 #else /* CAM_NEW_TRAN_CODE */
 5701         cts.flags &= ~CCB_TRANS_USER_SETTINGS;
 5702         cts.flags |= CCB_TRANS_CURRENT_SETTINGS;
 5703 #endif /* CAM_NEW_TRAN_CODE */
 5704         xpt_action((union ccb *)&cts);
 5705 }
 5706 
 5707 static void
 5708 probedone(struct cam_periph *periph, union ccb *done_ccb)
 5709 {
 5710         probe_softc *softc;
 5711         struct cam_path *path;
 5712         u_int32_t  priority;
 5713 
 5714         CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probedone\n"));
 5715 
 5716         softc = (probe_softc *)periph->softc;
 5717         path = done_ccb->ccb_h.path;
 5718         priority = done_ccb->ccb_h.pinfo.priority;
 5719 
 5720         switch (softc->action) {
 5721         case PROBE_TUR:
 5722         {
 5723                 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
 5724 
 5725                         if (cam_periph_error(done_ccb, 0,
 5726                                              SF_NO_PRINT, NULL) == ERESTART)
 5727                                 return;
 5728                         else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
 5729                                 /* Don't wedge the queue */
 5730                                 xpt_release_devq(done_ccb->ccb_h.path,
 5731                                                  /*count*/1,
 5732                                                  /*run_queue*/TRUE);
 5733                 }
 5734                 softc->action = PROBE_INQUIRY;
 5735                 xpt_release_ccb(done_ccb);
 5736                 xpt_schedule(periph, priority);
 5737                 return;
 5738         }
 5739         case PROBE_INQUIRY:
 5740         case PROBE_FULL_INQUIRY:
 5741         {
 5742                 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
 5743                         struct scsi_inquiry_data *inq_buf;
 5744                         u_int8_t periph_qual;
 5745 
 5746                         path->device->flags |= CAM_DEV_INQUIRY_DATA_VALID;
 5747                         inq_buf = &path->device->inq_data;
 5748 
 5749                         periph_qual = SID_QUAL(inq_buf);
 5750                         
 5751                         switch(periph_qual) {
 5752                         case SID_QUAL_LU_CONNECTED:
 5753                         {
 5754                                 u_int8_t alen;
 5755 
 5756                                 /*
 5757                                  * We conservatively request only
 5758                                  * SHORT_INQUIRY_LEN bytes of inquiry
 5759                                  * information during our first try
 5760                                  * at sending an INQUIRY. If the device
 5761                                  * has more information to give,
 5762                                  * perform a second request specifying
 5763                                  * the amount of information the device
 5764                                  * is willing to give.
 5765                                  */
 5766                                 alen = inq_buf->additional_length;
 5767                                 if (softc->action == PROBE_INQUIRY
 5768                                  && alen > (SHORT_INQUIRY_LENGTH - 4)) {
 5769                                         softc->action = PROBE_FULL_INQUIRY;
 5770                                         xpt_release_ccb(done_ccb);
 5771                                         xpt_schedule(periph, priority);
 5772                                         return;
 5773                                 }
 5774 
 5775                                 xpt_find_quirk(path->device);
 5776 
 5777 #ifdef CAM_NEW_TRAN_CODE
 5778                                 xpt_devise_transport(path);
 5779 #endif /* CAM_NEW_TRAN_CODE */
 5780                                 if ((inq_buf->flags & SID_CmdQue) != 0)
 5781                                         softc->action = PROBE_MODE_SENSE;
 5782                                 else
 5783                                         softc->action = PROBE_SERIAL_NUM;
 5784 
 5785                                 path->device->flags &= ~CAM_DEV_UNCONFIGURED;
 5786 
 5787                                 xpt_release_ccb(done_ccb);
 5788                                 xpt_schedule(periph, priority);
 5789                                 return;
 5790                         }
 5791                         default:
 5792                                 break;
 5793                         }
 5794                 } else if (cam_periph_error(done_ccb, 0,
 5795                                             done_ccb->ccb_h.target_lun > 0
 5796                                             ? SF_RETRY_UA|SF_QUIET_IR
 5797                                             : SF_RETRY_UA,
 5798                                             &softc->saved_ccb) == ERESTART) {
 5799                         return;
 5800                 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
 5801                         /* Don't wedge the queue */
 5802                         xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
 5803                                          /*run_queue*/TRUE);
 5804                 }
 5805                 /*
 5806                  * If we get to this point, we got an error status back
 5807                  * from the inquiry and the error status doesn't require
 5808                  * automatically retrying the command.  Therefore, the
 5809                  * inquiry failed.  If we had inquiry information before
 5810                  * for this device, but this latest inquiry command failed,
 5811                  * the device has probably gone away.  If this device isn't
 5812                  * already marked unconfigured, notify the peripheral
 5813                  * drivers that this device is no more.
 5814                  */
 5815                 if ((path->device->flags & CAM_DEV_UNCONFIGURED) == 0)
 5816                         /* Send the async notification. */
 5817                         xpt_async(AC_LOST_DEVICE, path, NULL);
 5818 
 5819                 xpt_release_ccb(done_ccb);
 5820                 break;
 5821         }
 5822         case PROBE_MODE_SENSE:
 5823         {
 5824                 struct ccb_scsiio *csio;
 5825                 struct scsi_mode_header_6 *mode_hdr;
 5826 
 5827                 csio = &done_ccb->csio;
 5828                 mode_hdr = (struct scsi_mode_header_6 *)csio->data_ptr;
 5829                 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
 5830                         struct scsi_control_page *page;
 5831                         u_int8_t *offset;
 5832 
 5833                         offset = ((u_int8_t *)&mode_hdr[1])
 5834                             + mode_hdr->blk_desc_len;
 5835                         page = (struct scsi_control_page *)offset;
 5836                         path->device->queue_flags = page->queue_flags;
 5837                 } else if (cam_periph_error(done_ccb, 0,
 5838                                             SF_RETRY_UA|SF_NO_PRINT,
 5839                                             &softc->saved_ccb) == ERESTART) {
 5840                         return;
 5841                 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
 5842                         /* Don't wedge the queue */
 5843                         xpt_release_devq(done_ccb->ccb_h.path,
 5844                                          /*count*/1, /*run_queue*/TRUE);
 5845                 }
 5846                 xpt_release_ccb(done_ccb);
 5847                 free(mode_hdr, M_TEMP);
 5848                 softc->action = PROBE_SERIAL_NUM;
 5849                 xpt_schedule(periph, priority);
 5850                 return;
 5851         }
 5852         case PROBE_SERIAL_NUM:
 5853         {
 5854                 struct ccb_scsiio *csio;
 5855                 struct scsi_vpd_unit_serial_number *serial_buf;
 5856                 u_int32_t  priority;
 5857                 int changed;
 5858                 int have_serialnum;
 5859 
 5860                 changed = 1;
 5861                 have_serialnum = 0;
 5862                 csio = &done_ccb->csio;
 5863                 priority = done_ccb->ccb_h.pinfo.priority;
 5864                 serial_buf =
 5865                     (struct scsi_vpd_unit_serial_number *)csio->data_ptr;
 5866 
 5867                 /* Clean up from previous instance of this device */
 5868                 if (path->device->serial_num != NULL) {
 5869                         free(path->device->serial_num, M_DEVBUF);
 5870                         path->device->serial_num = NULL;
 5871                         path->device->serial_num_len = 0;
 5872                 }
 5873 
 5874                 if (serial_buf == NULL) {
 5875                         /*
 5876                          * Don't process the command as it was never sent
 5877                          */
 5878                 } else if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP
 5879                         && (serial_buf->length > 0)) {
 5880 
 5881                         have_serialnum = 1;
 5882                         path->device->serial_num =
 5883                                 (u_int8_t *)malloc((serial_buf->length + 1),
 5884                                                    M_DEVBUF, M_NOWAIT);
 5885                         if (path->device->serial_num != NULL) {
 5886                                 bcopy(serial_buf->serial_num,
 5887                                       path->device->serial_num,
 5888                                       serial_buf->length);
 5889                                 path->device->serial_num_len =
 5890                                     serial_buf->length;
 5891                                 path->device->serial_num[serial_buf->length]
 5892                                     = '\0';
 5893                         }
 5894                 } else if (cam_periph_error(done_ccb, 0,
 5895                                             SF_RETRY_UA|SF_NO_PRINT,
 5896                                             &softc->saved_ccb) == ERESTART) {
 5897                         return;
 5898                 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
 5899                         /* Don't wedge the queue */
 5900                         xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
 5901                                          /*run_queue*/TRUE);
 5902                 }
 5903                 
 5904                 /*
 5905                  * Let's see if we have seen this device before.
 5906                  */
 5907                 if ((softc->flags & PROBE_INQUIRY_CKSUM) != 0) {
 5908                         MD5_CTX context;
 5909                         u_int8_t digest[16];
 5910 
 5911                         MD5Init(&context);
 5912                         
 5913                         MD5Update(&context,
 5914                                   (unsigned char *)&path->device->inq_data,
 5915                                   sizeof(struct scsi_inquiry_data));
 5916 
 5917                         if (have_serialnum)
 5918                                 MD5Update(&context, serial_buf->serial_num,
 5919                                           serial_buf->length);
 5920 
 5921                         MD5Final(digest, &context);
 5922                         if (bcmp(softc->digest, digest, 16) == 0)
 5923                                 changed = 0;
 5924 
 5925                         /*
 5926                          * XXX Do we need to do a TUR in order to ensure
 5927                          *     that the device really hasn't changed???
 5928                          */
 5929                         if ((changed != 0)
 5930                          && ((softc->flags & PROBE_NO_ANNOUNCE) == 0))
 5931                                 xpt_async(AC_LOST_DEVICE, path, NULL);
 5932                 }
 5933                 if (serial_buf != NULL)
 5934                         free(serial_buf, M_TEMP);
 5935 
 5936                 if (changed != 0) {
 5937                         /*
 5938                          * Now that we have all the necessary
 5939                          * information to safely perform transfer
 5940                          * negotiations... Controllers don't perform
 5941                          * any negotiation or tagged queuing until
 5942                          * after the first XPT_SET_TRAN_SETTINGS ccb is
 5943                          * received.  So, on a new device, just retreive
 5944                          * the user settings, and set them as the current
 5945                          * settings to set the device up.
 5946                          */
 5947                         proberequestdefaultnegotiation(periph);
 5948                         xpt_release_ccb(done_ccb);
 5949 
 5950                         /*
 5951                          * Perform a TUR to allow the controller to
 5952                          * perform any necessary transfer negotiation.
 5953                          */
 5954                         softc->action = PROBE_TUR_FOR_NEGOTIATION;
 5955                         xpt_schedule(periph, priority);
 5956                         return;
 5957                 }
 5958                 xpt_release_ccb(done_ccb);
 5959                 break;
 5960         }
 5961         case PROBE_TUR_FOR_NEGOTIATION:
 5962                 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
 5963                         /* Don't wedge the queue */
 5964                         xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
 5965                                          /*run_queue*/TRUE);
 5966                 }
 5967 
 5968                 path->device->flags &= ~CAM_DEV_UNCONFIGURED;
 5969 
 5970                 if ((softc->flags & PROBE_NO_ANNOUNCE) == 0) {
 5971                         /* Inform the XPT that a new device has been found */
 5972                         done_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
 5973                         xpt_action(done_ccb);
 5974 
 5975                         xpt_async(AC_FOUND_DEVICE, xpt_periph->path, done_ccb);
 5976                 }
 5977                 xpt_release_ccb(done_ccb);
 5978                 break;
 5979         }
 5980         done_ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs);
 5981         TAILQ_REMOVE(&softc->request_ccbs, &done_ccb->ccb_h, periph_links.tqe);
 5982         done_ccb->ccb_h.status = CAM_REQ_CMP;
 5983         xpt_done(done_ccb);
 5984         if (TAILQ_FIRST(&softc->request_ccbs) == NULL) {
 5985                 cam_periph_invalidate(periph);
 5986                 cam_periph_release(periph);
 5987         } else {
 5988                 probeschedule(periph);
 5989         }
 5990 }
 5991 
 5992 static void
 5993 probecleanup(struct cam_periph *periph)
 5994 {
 5995         free(periph->softc, M_TEMP);
 5996 }
 5997 
 5998 static void
 5999 xpt_find_quirk(struct cam_ed *device)
 6000 {
 6001         caddr_t match;
 6002 
 6003         match = cam_quirkmatch((caddr_t)&device->inq_data,
 6004                                (caddr_t)xpt_quirk_table,
 6005                                sizeof(xpt_quirk_table)/sizeof(*xpt_quirk_table),
 6006                                sizeof(*xpt_quirk_table), scsi_inquiry_match);
 6007 
 6008         if (match == NULL)
 6009                 panic("xpt_find_quirk: device didn't match wildcard entry!!");
 6010 
 6011         device->quirk = (struct xpt_quirk_entry *)match;
 6012 }
 6013 
 6014 #ifdef CAM_NEW_TRAN_CODE
 6015 
 6016 static void
 6017 xpt_devise_transport(struct cam_path *path)
 6018 {
 6019         struct ccb_pathinq cpi;
 6020         struct ccb_trans_settings cts;
 6021         struct scsi_inquiry_data *inq_buf;
 6022 
 6023         /* Get transport information from the SIM */
 6024         xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
 6025         cpi.ccb_h.func_code = XPT_PATH_INQ;
 6026         xpt_action((union ccb *)&cpi);
 6027 
 6028         inq_buf = NULL;
 6029         if ((path->device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0)
 6030                 inq_buf = &path->device->inq_data;
 6031         path->device->protocol = PROTO_SCSI;
 6032         path->device->protocol_version =
 6033             inq_buf != NULL ? SID_ANSI_REV(inq_buf) : cpi.protocol_version;
 6034         path->device->transport = cpi.transport;
 6035         path->device->transport_version = cpi.transport_version;
 6036 
 6037         /*
 6038          * Any device not using SPI3 features should
 6039          * be considered SPI2 or lower.
 6040          */
 6041         if (inq_buf != NULL) {
 6042                 if (path->device->transport == XPORT_SPI
 6043                  && (inq_buf->spi3data & SID_SPI_MASK) == 0
 6044                  && path->device->transport_version > 2)
 6045                         path->device->transport_version = 2;
 6046         } else {
 6047                 struct cam_ed* otherdev;
 6048 
 6049                 for (otherdev = TAILQ_FIRST(&path->target->ed_entries);
 6050                      otherdev != NULL;
 6051                      otherdev = TAILQ_NEXT(otherdev, links)) {
 6052                         if (otherdev != path->device)
 6053                                 break;
 6054                 }
 6055                     
 6056                 if (otherdev != NULL) {
 6057                         /*
 6058                          * Initially assume the same versioning as
 6059                          * prior luns for this target.
 6060                          */
 6061                         path->device->protocol_version =
 6062                             otherdev->protocol_version;
 6063                         path->device->transport_version =
 6064                             otherdev->transport_version;
 6065                 } else {
 6066                         /* Until we know better, opt for safty */
 6067                         path->device->protocol_version = 2;
 6068                         if (path->device->transport == XPORT_SPI)
 6069                                 path->device->transport_version = 2;
 6070                         else
 6071                                 path->device->transport_version = 0;
 6072                 }
 6073         }
 6074 
 6075         /*
 6076          * XXX
 6077          * For a device compliant with SPC-2 we should be able
 6078          * to determine the transport version supported by
 6079          * scrutinizing the version descriptors in the
 6080          * inquiry buffer.
 6081          */
 6082 
 6083         /* Tell the controller what we think */
 6084         xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1);
 6085         cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
 6086         cts.type = CTS_TYPE_CURRENT_SETTINGS;
 6087         cts.transport = path->device->transport;
 6088         cts.transport_version = path->device->transport_version;
 6089         cts.protocol = path->device->protocol;
 6090         cts.protocol_version = path->device->protocol_version;
 6091         cts.proto_specific.valid = 0;
 6092         cts.xport_specific.valid = 0;
 6093         xpt_action((union ccb *)&cts);
 6094 }
 6095 
 6096 static void
 6097 xpt_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_ed *device,
 6098                           int async_update)
 6099 {
 6100         struct  ccb_pathinq cpi;
 6101         struct  ccb_trans_settings cur_cts;
 6102         struct  ccb_trans_settings_scsi *scsi;
 6103         struct  ccb_trans_settings_scsi *cur_scsi;
 6104         struct  cam_sim *sim;
 6105         struct  scsi_inquiry_data *inq_data;
 6106 
 6107         if (device == NULL) {
 6108                 cts->ccb_h.status = CAM_PATH_INVALID;
 6109                 xpt_done((union ccb *)cts);
 6110                 return;
 6111         }
 6112 
 6113         if (cts->protocol == PROTO_UNKNOWN
 6114          || cts->protocol == PROTO_UNSPECIFIED) {
 6115                 cts->protocol = device->protocol;
 6116                 cts->protocol_version = device->protocol_version;
 6117         }
 6118 
 6119         if (cts->protocol_version == PROTO_VERSION_UNKNOWN
 6120          || cts->protocol_version == PROTO_VERSION_UNSPECIFIED)
 6121                 cts->protocol_version = device->protocol_version;
 6122 
 6123         if (cts->protocol != device->protocol) {
 6124                 xpt_print_path(cts->ccb_h.path);
 6125                 printf("Uninitialized Protocol %x:%x?\n",
 6126                        cts->protocol, device->protocol);
 6127                 cts->protocol = device->protocol;
 6128         }
 6129 
 6130         if (cts->protocol_version > device->protocol_version) {
 6131                 if (bootverbose) {
 6132                         xpt_print_path(cts->ccb_h.path);
 6133                         printf("Down reving Protocol Version from %d to %d?\n",
 6134                                cts->protocol_version, device->protocol_version);
 6135                 }
 6136                 cts->protocol_version = device->protocol_version;
 6137         }
 6138 
 6139         if (cts->transport == XPORT_UNKNOWN
 6140          || cts->transport == XPORT_UNSPECIFIED) {
 6141                 cts->transport = device->transport;
 6142                 cts->transport_version = device->transport_version;
 6143         }
 6144 
 6145         if (cts->transport_version == XPORT_VERSION_UNKNOWN
 6146          || cts->transport_version == XPORT_VERSION_UNSPECIFIED)
 6147                 cts->transport_version = device->transport_version;
 6148 
 6149         if (cts->transport != device->transport) {
 6150                 xpt_print_path(cts->ccb_h.path);
 6151                 printf("Uninitialized Transport %x:%x?\n",
 6152                        cts->transport, device->transport);
 6153                 cts->transport = device->transport;
 6154         }
 6155 
 6156         if (cts->transport_version > device->transport_version) {
 6157                 if (bootverbose) {
 6158                         xpt_print_path(cts->ccb_h.path);
 6159                         printf("Down reving Transport Version from %d to %d?\n",
 6160                                cts->transport_version,
 6161                                device->transport_version);
 6162                 }
 6163                 cts->transport_version = device->transport_version;
 6164         }
 6165 
 6166         sim = cts->ccb_h.path->bus->sim;
 6167 
 6168         /*
 6169          * Nothing more of interest to do unless
 6170          * this is a device connected via the
 6171          * SCSI protocol.
 6172          */
 6173         if (cts->protocol != PROTO_SCSI) {
 6174                 if (async_update == FALSE) 
 6175                         (*(sim->sim_action))(sim, (union ccb *)cts);
 6176                 return;
 6177         }
 6178 
 6179         inq_data = &device->inq_data;
 6180         scsi = &cts->proto_specific.scsi;
 6181         xpt_setup_ccb(&cpi.ccb_h, cts->ccb_h.path, /*priority*/1);
 6182         cpi.ccb_h.func_code = XPT_PATH_INQ;
 6183         xpt_action((union ccb *)&cpi);
 6184 
 6185         /* SCSI specific sanity checking */
 6186         if ((cpi.hba_inquiry & PI_TAG_ABLE) == 0
 6187          || (inq_data->flags & SID_CmdQue) == 0
 6188          || (device->queue_flags & SCP_QUEUE_DQUE) != 0
 6189          || (device->quirk->mintags == 0)) {
 6190                 /*
 6191                  * Can't tag on hardware that doesn't support tags,
 6192                  * doesn't have it enabled, or has broken tag support.
 6193                  */
 6194                 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
 6195         }
 6196 
 6197         if (async_update == FALSE) {
 6198                 /*
 6199                  * Perform sanity checking against what the
 6200                  * controller and device can do.
 6201                  */
 6202                 xpt_setup_ccb(&cur_cts.ccb_h, cts->ccb_h.path, /*priority*/1);
 6203                 cur_cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
 6204                 cur_cts.type = cts->type;
 6205                 xpt_action((union ccb *)&cur_cts);
 6206 
 6207                 cur_scsi = &cur_cts.proto_specific.scsi;
 6208                 if ((scsi->valid & CTS_SCSI_VALID_TQ) == 0) {
 6209                         scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
 6210                         scsi->flags |= cur_scsi->flags & CTS_SCSI_FLAGS_TAG_ENB;
 6211                 }
 6212                 if ((cur_scsi->valid & CTS_SCSI_VALID_TQ) == 0)
 6213                         scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
 6214         }
 6215 
 6216         /* SPI specific sanity checking */
 6217         if (cts->transport == XPORT_SPI && async_update == FALSE) {
 6218                 u_int spi3caps;
 6219                 struct ccb_trans_settings_spi *spi;
 6220                 struct ccb_trans_settings_spi *cur_spi;
 6221 
 6222                 spi = &cts->xport_specific.spi;
 6223 
 6224                 cur_spi = &cur_cts.xport_specific.spi;
 6225 
 6226                 /* Fill in any gaps in what the user gave us */
 6227                 if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0)
 6228                         spi->sync_period = cur_spi->sync_period;
 6229                 if ((cur_spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0)
 6230                         spi->sync_period = 0;
 6231                 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0)
 6232                         spi->sync_offset = cur_spi->sync_offset;
 6233                 if ((cur_spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0)
 6234                         spi->sync_offset = 0;
 6235                 if ((spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0)
 6236                         spi->ppr_options = cur_spi->ppr_options;
 6237                 if ((cur_spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0)
 6238                         spi->ppr_options = 0;
 6239                 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) == 0)
 6240                         spi->bus_width = cur_spi->bus_width;
 6241                 if ((cur_spi->valid & CTS_SPI_VALID_BUS_WIDTH) == 0)
 6242                         spi->bus_width = 0;
 6243                 if ((spi->valid & CTS_SPI_VALID_DISC) == 0) {
 6244                         spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
 6245                         spi->flags |= cur_spi->flags & CTS_SPI_FLAGS_DISC_ENB;
 6246                 }
 6247                 if ((cur_spi->valid & CTS_SPI_VALID_DISC) == 0)
 6248                         spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
 6249                 if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0
 6250                   && (inq_data->flags & SID_Sync) == 0
 6251                   && cts->type == CTS_TYPE_CURRENT_SETTINGS)
 6252                  || ((cpi.hba_inquiry & PI_SDTR_ABLE) == 0)
 6253                  || (cur_spi->sync_offset == 0)
 6254                  || (cur_spi->sync_period == 0)) {
 6255                         /* Force async */
 6256                         spi->sync_period = 0;
 6257                         spi->sync_offset = 0;
 6258                 }
 6259 
 6260                 switch (spi->bus_width) {
 6261                 case MSG_EXT_WDTR_BUS_32_BIT:
 6262                         if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
 6263                           || (inq_data->flags & SID_WBus32) != 0
 6264                           || cts->type == CTS_TYPE_USER_SETTINGS)
 6265                          && (cpi.hba_inquiry & PI_WIDE_32) != 0)
 6266                                 break;
 6267                         /* Fall Through to 16-bit */
 6268                 case MSG_EXT_WDTR_BUS_16_BIT:
 6269                         if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
 6270                           || (inq_data->flags & SID_WBus16) != 0
 6271                           || cts->type == CTS_TYPE_USER_SETTINGS)
 6272                          && (cpi.hba_inquiry & PI_WIDE_16) != 0) {
 6273                                 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
 6274                                 break;
 6275                         }
 6276                         /* Fall Through to 8-bit */
 6277                 default: /* New bus width?? */
 6278                 case MSG_EXT_WDTR_BUS_8_BIT:
 6279                         /* All targets can do this */
 6280                         spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
 6281                         break;
 6282                 }
 6283 
 6284                 spi3caps = cpi.xport_specific.spi.ppr_options;
 6285                 if ((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0
 6286                  && cts->type == CTS_TYPE_CURRENT_SETTINGS)
 6287                         spi3caps &= inq_data->spi3data;
 6288 
 6289                 if ((spi3caps & SID_SPI_CLOCK_DT) == 0)
 6290                         spi->ppr_options &= ~MSG_EXT_PPR_DT_REQ;
 6291 
 6292                 if ((spi3caps & SID_SPI_IUS) == 0)
 6293                         spi->ppr_options &= ~MSG_EXT_PPR_IU_REQ;
 6294 
 6295                 if ((spi3caps & SID_SPI_QAS) == 0)
 6296                         spi->ppr_options &= ~MSG_EXT_PPR_QAS_REQ;
 6297 
 6298                 /* No SPI Transfer settings are allowed unless we are wide */
 6299                 if (spi->bus_width == 0)
 6300                         spi->ppr_options = 0;
 6301 
 6302                 if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) == 0) {
 6303                         /*
 6304                          * Can't tag queue without disconnection.
 6305                          */
 6306                         scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
 6307                         scsi->valid |= CTS_SCSI_VALID_TQ;
 6308                 }
 6309 
 6310                 /*
 6311                  * If we are currently performing tagged transactions to
 6312                  * this device and want to change its negotiation parameters,
 6313                  * go non-tagged for a bit to give the controller a chance to
 6314                  * negotiate unhampered by tag messages.
 6315                  */
 6316                 if (cts->type == CTS_TYPE_CURRENT_SETTINGS
 6317                  && (device->inq_flags & SID_CmdQue) != 0
 6318                  && (scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0
 6319                  && (spi->flags & (CTS_SPI_VALID_SYNC_RATE|
 6320                                    CTS_SPI_VALID_SYNC_OFFSET|
 6321                                    CTS_SPI_VALID_BUS_WIDTH)) != 0)
 6322                         xpt_toggle_tags(cts->ccb_h.path);
 6323         }
 6324 
 6325         if (cts->type == CTS_TYPE_CURRENT_SETTINGS
 6326          && (scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
 6327                 int device_tagenb;
 6328 
 6329                 /*
 6330                  * If we are transitioning from tags to no-tags or
 6331                  * vice-versa, we need to carefully freeze and restart
 6332                  * the queue so that we don't overlap tagged and non-tagged
 6333                  * commands.  We also temporarily stop tags if there is
 6334                  * a change in transfer negotiation settings to allow
 6335                  * "tag-less" negotiation.
 6336                  */
 6337                 if ((device->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
 6338                  || (device->inq_flags & SID_CmdQue) != 0)
 6339                         device_tagenb = TRUE;
 6340                 else
 6341                         device_tagenb = FALSE;
 6342 
 6343                 if (((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0
 6344                   && device_tagenb == FALSE)
 6345                  || ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) == 0
 6346                   && device_tagenb == TRUE)) {
 6347 
 6348                         if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) {
 6349                                 /*
 6350                                  * Delay change to use tags until after a
 6351                                  * few commands have gone to this device so
 6352                                  * the controller has time to perform transfer
 6353                                  * negotiations without tagged messages getting
 6354                                  * in the way.
 6355                                  */
 6356                                 device->tag_delay_count = CAM_TAG_DELAY_COUNT;
 6357                                 device->flags |= CAM_DEV_TAG_AFTER_COUNT;
 6358                         } else {
 6359                                 struct ccb_relsim crs;
 6360 
 6361                                 xpt_freeze_devq(cts->ccb_h.path, /*count*/1);
 6362                                 device->inq_flags &= ~SID_CmdQue;
 6363                                 xpt_dev_ccbq_resize(cts->ccb_h.path,
 6364                                                     sim->max_dev_openings);
 6365                                 device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
 6366                                 device->tag_delay_count = 0;
 6367 
 6368                                 xpt_setup_ccb(&crs.ccb_h, cts->ccb_h.path,
 6369                                               /*priority*/1);
 6370                                 crs.ccb_h.func_code = XPT_REL_SIMQ;
 6371                                 crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
 6372                                 crs.openings
 6373                                     = crs.release_timeout 
 6374                                     = crs.qfrozen_cnt
 6375                                     = 0;
 6376                                 xpt_action((union ccb *)&crs);
 6377                         }
 6378                 }
 6379         }
 6380         if (async_update == FALSE) 
 6381                 (*(sim->sim_action))(sim, (union ccb *)cts);
 6382 }
 6383 
 6384 #else /* CAM_NEW_TRAN_CODE */
 6385 
 6386 static void
 6387 xpt_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_ed *device,
 6388                           int async_update)
 6389 {
 6390         struct  cam_sim *sim;
 6391         int     qfrozen;
 6392 
 6393         sim = cts->ccb_h.path->bus->sim;
 6394         if (async_update == FALSE) {
 6395                 struct  scsi_inquiry_data *inq_data;
 6396                 struct  ccb_pathinq cpi;
 6397                 struct  ccb_trans_settings cur_cts;
 6398 
 6399                 if (device == NULL) {
 6400                         cts->ccb_h.status = CAM_PATH_INVALID;
 6401                         xpt_done((union ccb *)cts);
 6402                         return;
 6403                 }
 6404 
 6405                 /*
 6406                  * Perform sanity checking against what the
 6407                  * controller and device can do.
 6408                  */
 6409                 xpt_setup_ccb(&cpi.ccb_h, cts->ccb_h.path, /*priority*/1);
 6410                 cpi.ccb_h.func_code = XPT_PATH_INQ;
 6411                 xpt_action((union ccb *)&cpi);
 6412                 xpt_setup_ccb(&cur_cts.ccb_h, cts->ccb_h.path, /*priority*/1);
 6413                 cur_cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
 6414                 cur_cts.flags = CCB_TRANS_CURRENT_SETTINGS;
 6415                 xpt_action((union ccb *)&cur_cts);
 6416                 inq_data = &device->inq_data;
 6417 
 6418                 /* Fill in any gaps in what the user gave us */
 6419                 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) == 0)
 6420                         cts->sync_period = cur_cts.sync_period;
 6421                 if ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) == 0)
 6422                         cts->sync_offset = cur_cts.sync_offset;
 6423                 if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) == 0)
 6424                         cts->bus_width = cur_cts.bus_width;
 6425                 if ((cts->valid & CCB_TRANS_DISC_VALID) == 0) {
 6426                         cts->flags &= ~CCB_TRANS_DISC_ENB;
 6427                         cts->flags |= cur_cts.flags & CCB_TRANS_DISC_ENB;
 6428                 }
 6429                 if ((cts->valid & CCB_TRANS_TQ_VALID) == 0) {
 6430                         cts->flags &= ~CCB_TRANS_TAG_ENB;
 6431                         cts->flags |= cur_cts.flags & CCB_TRANS_TAG_ENB;
 6432                 }
 6433 
 6434                 if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0
 6435                   && (inq_data->flags & SID_Sync) == 0)
 6436                  || ((cpi.hba_inquiry & PI_SDTR_ABLE) == 0)
 6437                  || (cts->sync_offset == 0)
 6438                  || (cts->sync_period == 0)) {
 6439                         /* Force async */
 6440                         cts->sync_period = 0;
 6441                         cts->sync_offset = 0;
 6442                 } else if ((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0
 6443                         && (inq_data->spi3data & SID_SPI_CLOCK_DT) == 0
 6444                         && cts->sync_period <= 0x9) {
 6445                         /*
 6446                          * Don't allow DT transmission rates if the
 6447                          * device does not support it.
 6448                          */
 6449                         cts->sync_period = 0xa;
 6450                 }
 6451 
 6452                 switch (cts->bus_width) {
 6453                 case MSG_EXT_WDTR_BUS_32_BIT:
 6454                         if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
 6455                           || (inq_data->flags & SID_WBus32) != 0)
 6456                          && (cpi.hba_inquiry & PI_WIDE_32) != 0)
 6457                                 break;
 6458                         /* FALLTHROUGH to 16-bit */
 6459                 case MSG_EXT_WDTR_BUS_16_BIT:
 6460                         if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
 6461                           || (inq_data->flags & SID_WBus16) != 0)
 6462                          && (cpi.hba_inquiry & PI_WIDE_16) != 0) {
 6463                                 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
 6464                                 break;
 6465                         }
 6466                         /* FALLTHROUGH to 8-bit */
 6467                 default: /* New bus width?? */
 6468                 case MSG_EXT_WDTR_BUS_8_BIT:
 6469                         /* All targets can do this */
 6470                         cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
 6471                         break;
 6472                 }
 6473 
 6474                 if ((cts->flags & CCB_TRANS_DISC_ENB) == 0) {
 6475                         /*
 6476                          * Can't tag queue without disconnection.
 6477                          */
 6478                         cts->flags &= ~CCB_TRANS_TAG_ENB;
 6479                         cts->valid |= CCB_TRANS_TQ_VALID;
 6480                 }
 6481 
 6482                 if ((cpi.hba_inquiry & PI_TAG_ABLE) == 0
 6483                  || (inq_data->flags & SID_CmdQue) == 0
 6484                  || (device->queue_flags & SCP_QUEUE_DQUE) != 0
 6485                  || (device->quirk->mintags == 0)) {
 6486                         /*
 6487                          * Can't tag on hardware that doesn't support,
 6488                          * doesn't have it enabled, or has broken tag support.
 6489                          */
 6490                         cts->flags &= ~CCB_TRANS_TAG_ENB;
 6491                 }
 6492         }
 6493 
 6494         qfrozen = FALSE;
 6495         if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) {
 6496                 int device_tagenb;
 6497 
 6498                 /*
 6499                  * If we are transitioning from tags to no-tags or
 6500                  * vice-versa, we need to carefully freeze and restart
 6501                  * the queue so that we don't overlap tagged and non-tagged
 6502                  * commands.  We also temporarily stop tags if there is
 6503                  * a change in transfer negotiation settings to allow
 6504                  * "tag-less" negotiation.
 6505                  */
 6506                 if ((device->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
 6507                  || (device->inq_flags & SID_CmdQue) != 0)
 6508                         device_tagenb = TRUE;
 6509                 else
 6510                         device_tagenb = FALSE;
 6511 
 6512                 if (((cts->flags & CCB_TRANS_TAG_ENB) != 0
 6513                   && device_tagenb == FALSE)
 6514                  || ((cts->flags & CCB_TRANS_TAG_ENB) == 0
 6515                   && device_tagenb == TRUE)) {
 6516 
 6517                         if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) {
 6518                                 /*
 6519                                  * Delay change to use tags until after a
 6520                                  * few commands have gone to this device so
 6521                                  * the controller has time to perform transfer
 6522                                  * negotiations without tagged messages getting
 6523                                  * in the way.
 6524                                  */
 6525                                 device->tag_delay_count = CAM_TAG_DELAY_COUNT;
 6526                                 device->flags |= CAM_DEV_TAG_AFTER_COUNT;
 6527                         } else {
 6528                                 xpt_freeze_devq(cts->ccb_h.path, /*count*/1);
 6529                                 qfrozen = TRUE;
 6530                                 device->inq_flags &= ~SID_CmdQue;
 6531                                 xpt_dev_ccbq_resize(cts->ccb_h.path,
 6532                                                     sim->max_dev_openings);
 6533                                 device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
 6534                                 device->tag_delay_count = 0;
 6535                         }
 6536                 }
 6537         }
 6538 
 6539         if (async_update == FALSE) {
 6540                 /*
 6541                  * If we are currently performing tagged transactions to
 6542                  * this device and want to change its negotiation parameters,
 6543                  * go non-tagged for a bit to give the controller a chance to
 6544                  * negotiate unhampered by tag messages.
 6545                  */
 6546                 if ((device->inq_flags & SID_CmdQue) != 0
 6547                  && (cts->flags & (CCB_TRANS_SYNC_RATE_VALID|
 6548                                    CCB_TRANS_SYNC_OFFSET_VALID|
 6549                                    CCB_TRANS_BUS_WIDTH_VALID)) != 0)
 6550                         xpt_toggle_tags(cts->ccb_h.path);
 6551 
 6552                 (*(sim->sim_action))(sim, (union ccb *)cts);
 6553         }
 6554 
 6555         if (qfrozen) {
 6556                 struct ccb_relsim crs;
 6557 
 6558                 xpt_setup_ccb(&crs.ccb_h, cts->ccb_h.path,
 6559                               /*priority*/1);
 6560                 crs.ccb_h.func_code = XPT_REL_SIMQ;
 6561                 crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
 6562                 crs.openings
 6563                     = crs.release_timeout 
 6564                     = crs.qfrozen_cnt
 6565                     = 0;
 6566                 xpt_action((union ccb *)&crs);
 6567         }
 6568 }
 6569 
 6570 
 6571 #endif /* CAM_NEW_TRAN_CODE */
 6572 
 6573 static void
 6574 xpt_toggle_tags(struct cam_path *path)
 6575 {
 6576         struct cam_ed *dev;
 6577 
 6578         /*
 6579          * Give controllers a chance to renegotiate
 6580          * before starting tag operations.  We
 6581          * "toggle" tagged queuing off then on
 6582          * which causes the tag enable command delay
 6583          * counter to come into effect.
 6584          */
 6585         dev = path->device;
 6586         if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
 6587          || ((dev->inq_flags & SID_CmdQue) != 0
 6588           && (dev->inq_flags & (SID_Sync|SID_WBus16|SID_WBus32)) != 0)) {
 6589                 struct ccb_trans_settings cts;
 6590 
 6591                 xpt_setup_ccb(&cts.ccb_h, path, 1);
 6592 #ifdef CAM_NEW_TRAN_CODE
 6593                 cts.protocol = PROTO_SCSI;
 6594                 cts.protocol_version = PROTO_VERSION_UNSPECIFIED;
 6595                 cts.transport = XPORT_UNSPECIFIED;
 6596                 cts.transport_version = XPORT_VERSION_UNSPECIFIED;
 6597                 cts.proto_specific.scsi.flags = 0;
 6598                 cts.proto_specific.scsi.valid = CTS_SCSI_VALID_TQ;
 6599 #else /* CAM_NEW_TRAN_CODE */
 6600                 cts.flags = 0;
 6601                 cts.valid = CCB_TRANS_TQ_VALID;
 6602 #endif /* CAM_NEW_TRAN_CODE */
 6603                 xpt_set_transfer_settings(&cts, path->device,
 6604                                           /*async_update*/TRUE);
 6605 #ifdef CAM_NEW_TRAN_CODE
 6606                 cts.proto_specific.scsi.flags = CTS_SCSI_FLAGS_TAG_ENB;
 6607 #else /* CAM_NEW_TRAN_CODE */
 6608                 cts.flags = CCB_TRANS_TAG_ENB;
 6609 #endif /* CAM_NEW_TRAN_CODE */
 6610                 xpt_set_transfer_settings(&cts, path->device,
 6611                                           /*async_update*/TRUE);
 6612         }
 6613 }
 6614 
 6615 static void
 6616 xpt_start_tags(struct cam_path *path)
 6617 {
 6618         struct ccb_relsim crs;
 6619         struct cam_ed *device;
 6620         struct cam_sim *sim;
 6621         int    newopenings;
 6622 
 6623         device = path->device;
 6624         sim = path->bus->sim;
 6625         device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
 6626         xpt_freeze_devq(path, /*count*/1);
 6627         device->inq_flags |= SID_CmdQue;
 6628         newopenings = min(device->quirk->maxtags, sim->max_tagged_dev_openings);
 6629         xpt_dev_ccbq_resize(path, newopenings);
 6630         xpt_setup_ccb(&crs.ccb_h, path, /*priority*/1);
 6631         crs.ccb_h.func_code = XPT_REL_SIMQ;
 6632         crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
 6633         crs.openings
 6634             = crs.release_timeout 
 6635             = crs.qfrozen_cnt
 6636             = 0;
 6637         xpt_action((union ccb *)&crs);
 6638 }
 6639 
 6640 static int busses_to_config;
 6641 static int busses_to_reset;
 6642 
 6643 static int
 6644 xptconfigbuscountfunc(struct cam_eb *bus, void *arg)
 6645 {
 6646         if (bus->path_id != CAM_XPT_PATH_ID) {
 6647                 struct cam_path path;
 6648                 struct ccb_pathinq cpi;
 6649                 int can_negotiate;
 6650 
 6651                 busses_to_config++;
 6652                 xpt_compile_path(&path, NULL, bus->path_id,
 6653                                  CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
 6654                 xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
 6655                 cpi.ccb_h.func_code = XPT_PATH_INQ;
 6656                 xpt_action((union ccb *)&cpi);
 6657                 can_negotiate = cpi.hba_inquiry;
 6658                 can_negotiate &= (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE);
 6659                 if ((cpi.hba_misc & PIM_NOBUSRESET) == 0
 6660                  && can_negotiate)
 6661                         busses_to_reset++;
 6662                 xpt_release_path(&path);
 6663         }
 6664 
 6665         return(1);
 6666 }
 6667 
 6668 static int
 6669 xptconfigfunc(struct cam_eb *bus, void *arg)
 6670 {
 6671         struct  cam_path *path;
 6672         union   ccb *work_ccb;
 6673 
 6674         if (bus->path_id != CAM_XPT_PATH_ID) {
 6675                 cam_status status;
 6676                 int can_negotiate;
 6677 
 6678                 work_ccb = xpt_alloc_ccb();
 6679                 if ((status = xpt_create_path(&path, xpt_periph, bus->path_id,
 6680                                               CAM_TARGET_WILDCARD,
 6681                                               CAM_LUN_WILDCARD)) !=CAM_REQ_CMP){
 6682                         printf("xptconfigfunc: xpt_create_path failed with "
 6683                                "status %#x for bus %d\n", status, bus->path_id);
 6684                         printf("xptconfigfunc: halting bus configuration\n");
 6685                         xpt_free_ccb(work_ccb);
 6686                         busses_to_config--;
 6687                         xpt_finishconfig(xpt_periph, NULL);
 6688                         return(0);
 6689                 }
 6690                 xpt_setup_ccb(&work_ccb->ccb_h, path, /*priority*/1);
 6691                 work_ccb->ccb_h.func_code = XPT_PATH_INQ;
 6692                 xpt_action(work_ccb);
 6693                 if (work_ccb->ccb_h.status != CAM_REQ_CMP) {
 6694                         printf("xptconfigfunc: CPI failed on bus %d "
 6695                                "with status %d\n", bus->path_id,
 6696                                work_ccb->ccb_h.status);
 6697                         xpt_finishconfig(xpt_periph, work_ccb);
 6698                         return(1);
 6699                 }
 6700 
 6701                 can_negotiate = work_ccb->cpi.hba_inquiry;
 6702                 can_negotiate &= (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE);
 6703                 if ((work_ccb->cpi.hba_misc & PIM_NOBUSRESET) == 0
 6704                  && (can_negotiate != 0)) {
 6705                         xpt_setup_ccb(&work_ccb->ccb_h, path, /*priority*/1);
 6706                         work_ccb->ccb_h.func_code = XPT_RESET_BUS;
 6707                         work_ccb->ccb_h.cbfcnp = NULL;
 6708                         CAM_DEBUG(path, CAM_DEBUG_SUBTRACE,
 6709                                   ("Resetting Bus\n"));
 6710                         xpt_action(work_ccb);
 6711                         xpt_finishconfig(xpt_periph, work_ccb);
 6712                 } else {
 6713                         /* Act as though we performed a successful BUS RESET */
 6714                         work_ccb->ccb_h.func_code = XPT_RESET_BUS;
 6715                         xpt_finishconfig(xpt_periph, work_ccb);
 6716                 }
 6717         }
 6718 
 6719         return(1);
 6720 }
 6721 
 6722 static void
 6723 xpt_config(void *arg)
 6724 {
 6725         /*
 6726          * Now that interrupts are enabled, go find our devices
 6727          */
 6728 
 6729 #ifdef CAMDEBUG
 6730         /* Setup debugging flags and path */
 6731 #ifdef CAM_DEBUG_FLAGS
 6732         cam_dflags = CAM_DEBUG_FLAGS;
 6733 #else /* !CAM_DEBUG_FLAGS */
 6734         cam_dflags = CAM_DEBUG_NONE;
 6735 #endif /* CAM_DEBUG_FLAGS */
 6736 #ifdef CAM_DEBUG_BUS
 6737         if (cam_dflags != CAM_DEBUG_NONE) {
 6738                 if (xpt_create_path(&cam_dpath, xpt_periph,
 6739                                     CAM_DEBUG_BUS, CAM_DEBUG_TARGET,
 6740                                     CAM_DEBUG_LUN) != CAM_REQ_CMP) {
 6741                         printf("xpt_config: xpt_create_path() failed for debug"
 6742                                " target %d:%d:%d, debugging disabled\n",
 6743                                CAM_DEBUG_BUS, CAM_DEBUG_TARGET, CAM_DEBUG_LUN);
 6744                         cam_dflags = CAM_DEBUG_NONE;
 6745                 }
 6746         } else
 6747                 cam_dpath = NULL;
 6748 #else /* !CAM_DEBUG_BUS */
 6749         cam_dpath = NULL;
 6750 #endif /* CAM_DEBUG_BUS */
 6751 #endif /* CAMDEBUG */
 6752 
 6753         /*
 6754          * Scan all installed busses.
 6755          */
 6756         xpt_for_all_busses(xptconfigbuscountfunc, NULL);
 6757 
 6758         if (busses_to_config == 0) {
 6759                 /* Call manually because we don't have any busses */
 6760                 xpt_finishconfig(xpt_periph, NULL);
 6761         } else  {
 6762                 if (busses_to_reset > 0 && scsi_delay >= 2000) {
 6763                         printf("Waiting %d seconds for SCSI "
 6764                                "devices to settle\n", scsi_delay/1000);
 6765                 }
 6766                 xpt_for_all_busses(xptconfigfunc, NULL);
 6767         }
 6768 }
 6769 
 6770 /*
 6771  * If the given device only has one peripheral attached to it, and if that
 6772  * peripheral is the passthrough driver, announce it.  This insures that the
 6773  * user sees some sort of announcement for every peripheral in their system.
 6774  */
 6775 static int
 6776 xptpassannouncefunc(struct cam_ed *device, void *arg)
 6777 {
 6778         struct cam_periph *periph;
 6779         int i;
 6780 
 6781         for (periph = SLIST_FIRST(&device->periphs), i = 0; periph != NULL;
 6782              periph = SLIST_NEXT(periph, periph_links), i++);
 6783 
 6784         periph = SLIST_FIRST(&device->periphs);
 6785         if ((i == 1)
 6786          && (strncmp(periph->periph_name, "pass", 4) == 0))
 6787                 xpt_announce_periph(periph, NULL);
 6788 
 6789         return(1);
 6790 }
 6791 
 6792 static void
 6793 xpt_finishconfig(struct cam_periph *periph, union ccb *done_ccb)
 6794 {
 6795         struct  periph_driver **p_drv;
 6796         int     i;
 6797 
 6798         if (done_ccb != NULL) {
 6799                 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE,
 6800                           ("xpt_finishconfig\n"));
 6801                 switch(done_ccb->ccb_h.func_code) {
 6802                 case XPT_RESET_BUS:
 6803                         if (done_ccb->ccb_h.status == CAM_REQ_CMP) {
 6804                                 done_ccb->ccb_h.func_code = XPT_SCAN_BUS;
 6805                                 done_ccb->ccb_h.cbfcnp = xpt_finishconfig;
 6806                                 xpt_action(done_ccb);
 6807                                 return;
 6808                         }
 6809                         /* FALLTHROUGH */
 6810                 case XPT_SCAN_BUS:
 6811                 default:
 6812                         xpt_free_path(done_ccb->ccb_h.path);
 6813                         busses_to_config--;
 6814                         break;
 6815                 }
 6816         }
 6817 
 6818         if (busses_to_config == 0) {
 6819                 /* Register all the peripheral drivers */
 6820                 /* XXX This will have to change when we have loadable modules */
 6821                 p_drv = periph_drivers;
 6822                 for (i = 0; p_drv[i] != NULL; i++) {
 6823                         (*p_drv[i]->init)();
 6824                 }
 6825 
 6826                 /*
 6827                  * Check for devices with no "standard" peripheral driver
 6828                  * attached.  For any devices like that, announce the
 6829                  * passthrough driver so the user will see something.
 6830                  */
 6831                 xpt_for_all_devices(xptpassannouncefunc, NULL);
 6832 
 6833                 /* Release our hook so that the boot can continue. */
 6834                 config_intrhook_disestablish(xpt_config_hook);
 6835                 free(xpt_config_hook, M_TEMP);
 6836                 xpt_config_hook = NULL;
 6837         }
 6838         if (done_ccb != NULL)
 6839                 xpt_free_ccb(done_ccb);
 6840 }
 6841 
 6842 static void
 6843 xptaction(struct cam_sim *sim, union ccb *work_ccb)
 6844 {
 6845         CAM_DEBUG(work_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xptaction\n"));
 6846 
 6847         switch (work_ccb->ccb_h.func_code) {
 6848         /* Common cases first */
 6849         case XPT_PATH_INQ:              /* Path routing inquiry */
 6850         {
 6851                 struct ccb_pathinq *cpi;
 6852 
 6853                 cpi = &work_ccb->cpi;
 6854                 cpi->version_num = 1; /* XXX??? */
 6855                 cpi->hba_inquiry = 0;
 6856                 cpi->target_sprt = 0;
 6857                 cpi->hba_misc = 0;
 6858                 cpi->hba_eng_cnt = 0;
 6859                 cpi->max_target = 0;
 6860                 cpi->max_lun = 0;
 6861                 cpi->initiator_id = 0;
 6862                 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
 6863                 strncpy(cpi->hba_vid, "", HBA_IDLEN);
 6864                 strncpy(cpi->dev_name, sim->sim_name, DEV_IDLEN);
 6865                 cpi->unit_number = sim->unit_number;
 6866                 cpi->bus_id = sim->bus_id;
 6867                 cpi->base_transfer_speed = 0;
 6868 #ifdef CAM_NEW_TRAN_CODE
 6869                 cpi->protocol = PROTO_UNSPECIFIED;
 6870                 cpi->protocol_version = PROTO_VERSION_UNSPECIFIED;
 6871                 cpi->transport = XPORT_UNSPECIFIED;
 6872                 cpi->transport_version = XPORT_VERSION_UNSPECIFIED;
 6873 #endif /* CAM_NEW_TRAN_CODE */
 6874                 cpi->ccb_h.status = CAM_REQ_CMP;
 6875                 xpt_done(work_ccb);
 6876                 break;
 6877         }
 6878         default:
 6879                 work_ccb->ccb_h.status = CAM_REQ_INVALID;
 6880                 xpt_done(work_ccb);
 6881                 break;
 6882         }
 6883 }
 6884 
 6885 /*
 6886  * The xpt as a "controller" has no interrupt sources, so polling
 6887  * is a no-op.
 6888  */
 6889 static void
 6890 xptpoll(struct cam_sim *sim)
 6891 {
 6892 }
 6893 
 6894 static void
 6895 camisr(void *V_queue)
 6896 {
 6897         cam_isrq_t *queue = V_queue;
 6898         int     s;
 6899         struct  ccb_hdr *ccb_h;
 6900 
 6901         s = splcam();
 6902         while ((ccb_h = TAILQ_FIRST(queue)) != NULL) {
 6903                 int     runq;
 6904 
 6905                 TAILQ_REMOVE(queue, ccb_h, sim_links.tqe);
 6906                 ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
 6907                 splx(s);
 6908 
 6909                 CAM_DEBUG(ccb_h->path, CAM_DEBUG_TRACE,
 6910                           ("camisr\n"));
 6911 
 6912                 runq = FALSE;
 6913 
 6914                 if (ccb_h->flags & CAM_HIGH_POWER) {
 6915                         struct highpowerlist    *hphead;
 6916                         struct cam_ed           *device;
 6917                         union ccb               *send_ccb;
 6918 
 6919                         hphead = &highpowerq;
 6920 
 6921                         send_ccb = (union ccb *)STAILQ_FIRST(hphead);
 6922 
 6923                         /*
 6924                          * Increment the count since this command is done.
 6925                          */
 6926                         num_highpower++;
 6927 
 6928                         /* 
 6929                          * Any high powered commands queued up?
 6930                          */
 6931                         if (send_ccb != NULL) {
 6932                                 device = send_ccb->ccb_h.path->device;
 6933 
 6934                                 STAILQ_REMOVE_HEAD(hphead, xpt_links.stqe);
 6935 
 6936                                 xpt_release_devq(send_ccb->ccb_h.path,
 6937                                                  /*count*/1, /*runqueue*/TRUE);
 6938                         }
 6939                 }
 6940                 if ((ccb_h->func_code & XPT_FC_USER_CCB) == 0) {
 6941                         struct cam_ed *dev;
 6942 
 6943                         dev = ccb_h->path->device;
 6944 
 6945                         s = splcam();
 6946                         cam_ccbq_ccb_done(&dev->ccbq, (union ccb *)ccb_h);
 6947 
 6948                         ccb_h->path->bus->sim->devq->send_active--;
 6949                         ccb_h->path->bus->sim->devq->send_openings++;
 6950                         splx(s);
 6951                         
 6952                         if (((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0
 6953                           && (ccb_h->status&CAM_STATUS_MASK) != CAM_REQUEUE_REQ)
 6954                          || ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
 6955                           && (dev->ccbq.dev_active == 0))) {
 6956                                 
 6957                                 xpt_release_devq(ccb_h->path, /*count*/1,
 6958                                                  /*run_queue*/TRUE);
 6959                         }
 6960 
 6961                         if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
 6962                          && (--dev->tag_delay_count == 0))
 6963                                 xpt_start_tags(ccb_h->path);
 6964 
 6965                         if ((dev->ccbq.queue.entries > 0)
 6966                          && (dev->qfrozen_cnt == 0)
 6967                          && (device_is_send_queued(dev) == 0)) {
 6968                                 runq = xpt_schedule_dev_sendq(ccb_h->path->bus,
 6969                                                               dev);
 6970                         }
 6971                 }
 6972 
 6973                 if (ccb_h->status & CAM_RELEASE_SIMQ) {
 6974                         xpt_release_simq(ccb_h->path->bus->sim,
 6975                                          /*run_queue*/TRUE);
 6976                         ccb_h->status &= ~CAM_RELEASE_SIMQ;
 6977                         runq = FALSE;
 6978                 } 
 6979 
 6980                 if ((ccb_h->flags & CAM_DEV_QFRZDIS)
 6981                  && (ccb_h->status & CAM_DEV_QFRZN)) {
 6982                         xpt_release_devq(ccb_h->path, /*count*/1,
 6983                                          /*run_queue*/TRUE);
 6984                         ccb_h->status &= ~CAM_DEV_QFRZN;
 6985                 } else if (runq) {
 6986                         xpt_run_dev_sendq(ccb_h->path->bus);
 6987                 }
 6988 
 6989                 /* Call the peripheral driver's callback */
 6990                 (*ccb_h->cbfcnp)(ccb_h->path->periph, (union ccb *)ccb_h);
 6991 
 6992                 /* Raise IPL for while test */
 6993                 s = splcam();
 6994         }
 6995         splx(s);
 6996 }

Cache object: ec55a7d165307679d40a04a23635f99c


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.