The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/cam/cam_xpt.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Implementation of the Common Access Method Transport (XPT) layer.
    3  *
    4  * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs.
    5  * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry.
    6  * All rights reserved.
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions, and the following disclaimer,
   13  *    without modification, immediately at the beginning of the file.
   14  * 2. The name of the author may not be used to endorse or promote products
   15  *    derived from this software without specific prior written permission.
   16  *
   17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   20  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
   21  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   27  * SUCH DAMAGE.
   28  *
   29  * $FreeBSD$
   30  */
   31 #include <sys/param.h>
   32 #include <sys/systm.h>
   33 #include <sys/types.h>
   34 #include <sys/malloc.h>
   35 #include <sys/kernel.h>
   36 #include <sys/time.h>
   37 #include <sys/conf.h>
   38 #include <sys/fcntl.h>
   39 #include <sys/md5.h>
   40 #include <sys/devicestat.h>
   41 #include <sys/interrupt.h>
   42 #include <sys/bus.h>
   43 
   44 #ifdef PC98
   45 #include <pc98/pc98/pc98_machdep.h>     /* geometry translation */
   46 #endif
   47 
   48 #include <machine/clock.h>
   49 #include <machine/ipl.h>
   50 
   51 #include <cam/cam.h>
   52 #include <cam/cam_ccb.h>
   53 #include <cam/cam_periph.h>
   54 #include <cam/cam_sim.h>
   55 #include <cam/cam_xpt.h>
   56 #include <cam/cam_xpt_sim.h>
   57 #include <cam/cam_xpt_periph.h>
   58 #include <cam/cam_debug.h>
   59 
   60 #include <cam/scsi/scsi_all.h>
   61 #include <cam/scsi/scsi_message.h>
   62 #include <cam/scsi/scsi_pass.h>
   63 #include <machine/stdarg.h>     /* for xpt_print below */
   64 #include "opt_cam.h"
   65 
   66 /* Datastructures internal to the xpt layer */
   67 
   68 /*
   69  * Definition of an async handler callback block.  These are used to add
   70  * SIMs and peripherals to the async callback lists.
   71  */
   72 struct async_node {
   73         SLIST_ENTRY(async_node) links;
   74         u_int32_t       event_enable;   /* Async Event enables */
   75         void            (*callback)(void *arg, u_int32_t code,
   76                                     struct cam_path *path, void *args);
   77         void            *callback_arg;
   78 };
   79 
   80 SLIST_HEAD(async_list, async_node);
   81 SLIST_HEAD(periph_list, cam_periph);
   82 static STAILQ_HEAD(highpowerlist, ccb_hdr) highpowerq;
   83 
   84 /*
   85  * This is the maximum number of high powered commands (e.g. start unit)
   86  * that can be outstanding at a particular time.
   87  */
   88 #ifndef CAM_MAX_HIGHPOWER
   89 #define CAM_MAX_HIGHPOWER  4
   90 #endif
   91 
   92 /* number of high powered commands that can go through right now */
   93 static int num_highpower = CAM_MAX_HIGHPOWER;
   94 
   95 /*
   96  * Structure for queueing a device in a run queue.
   97  * There is one run queue for allocating new ccbs,
   98  * and another for sending ccbs to the controller.
   99  */
  100 struct cam_ed_qinfo {
  101         cam_pinfo pinfo;
  102         struct    cam_ed *device;
  103 };
  104 
  105 /*
  106  * The CAM EDT (Existing Device Table) contains the device information for
  107  * all devices for all busses in the system.  The table contains a
  108  * cam_ed structure for each device on the bus.
  109  */
  110 struct cam_ed {
  111         TAILQ_ENTRY(cam_ed) links;
  112         struct  cam_ed_qinfo alloc_ccb_entry;
  113         struct  cam_ed_qinfo send_ccb_entry;
  114         struct  cam_et   *target;
  115         lun_id_t         lun_id;
  116         struct  camq drvq;              /*
  117                                          * Queue of type drivers wanting to do
  118                                          * work on this device.
  119                                          */
  120         struct  cam_ccbq ccbq;          /* Queue of pending ccbs */
  121         struct  async_list asyncs;      /* Async callback info for this B/T/L */
  122         struct  periph_list periphs;    /* All attached devices */
  123         u_int   generation;             /* Generation number */
  124         struct  cam_periph *owner;      /* Peripheral driver's ownership tag */
  125         struct  xpt_quirk_entry *quirk; /* Oddities about this device */
  126                                         /* Storage for the inquiry data */
  127         struct  scsi_inquiry_data inq_data;
  128         u_int8_t         inq_flags;     /*
  129                                          * Current settings for inquiry flags.
  130                                          * This allows us to override settings
  131                                          * like disconnection and tagged
  132                                          * queuing for a device.
  133                                          */
  134         u_int8_t         queue_flags;   /* Queue flags from the control page */
  135         u_int8_t         serial_num_len;
  136         u_int8_t         *serial_num;
  137         u_int32_t        qfrozen_cnt;
  138         u_int32_t        flags;
  139 #define CAM_DEV_UNCONFIGURED            0x01
  140 #define CAM_DEV_REL_TIMEOUT_PENDING     0x02
  141 #define CAM_DEV_REL_ON_COMPLETE         0x04
  142 #define CAM_DEV_REL_ON_QUEUE_EMPTY      0x08
  143 #define CAM_DEV_RESIZE_QUEUE_NEEDED     0x10
  144 #define CAM_DEV_TAG_AFTER_COUNT         0x20
  145 #define CAM_DEV_INQUIRY_DATA_VALID      0x40
  146         u_int32_t        tag_delay_count;
  147 #define CAM_TAG_DELAY_COUNT             5
  148         u_int32_t        refcount;
  149         struct           callout_handle c_handle;
  150 };
  151 
  152 /*
  153  * Each target is represented by an ET (Existing Target).  These
  154  * entries are created when a target is successfully probed with an
  155  * identify, and removed when a device fails to respond after a number
  156  * of retries, or a bus rescan finds the device missing.
  157  */
  158 struct cam_et { 
  159         TAILQ_HEAD(, cam_ed) ed_entries;
  160         TAILQ_ENTRY(cam_et) links;
  161         struct  cam_eb  *bus;   
  162         target_id_t     target_id;
  163         u_int32_t       refcount;       
  164         u_int           generation;
  165         struct          timeval last_reset;
  166 };
  167 
  168 /*
  169  * Each bus is represented by an EB (Existing Bus).  These entries
  170  * are created by calls to xpt_bus_register and deleted by calls to
  171  * xpt_bus_deregister.
  172  */
  173 struct cam_eb { 
  174         TAILQ_HEAD(, cam_et) et_entries;
  175         TAILQ_ENTRY(cam_eb)  links;
  176         path_id_t            path_id;
  177         struct cam_sim       *sim;
  178         struct timeval       last_reset;
  179         u_int32_t            flags;
  180 #define CAM_EB_RUNQ_SCHEDULED   0x01
  181         u_int32_t            refcount;
  182         u_int                generation;
  183 };
  184 
  185 struct cam_path {
  186         struct cam_periph *periph;
  187         struct cam_eb     *bus;
  188         struct cam_et     *target;
  189         struct cam_ed     *device;
  190 };
  191 
  192 struct xpt_quirk_entry {
  193         struct scsi_inquiry_pattern inq_pat;
  194         u_int8_t quirks;
  195 #define CAM_QUIRK_NOLUNS        0x01
  196 #define CAM_QUIRK_NOSERIAL      0x02
  197 #define CAM_QUIRK_HILUNS        0x04
  198         u_int mintags;
  199         u_int maxtags;
  200 };
  201 #define CAM_SCSI2_MAXLUN        8
  202 
  203 typedef enum {
  204         XPT_FLAG_OPEN           = 0x01
  205 } xpt_flags;
  206 
  207 struct xpt_softc {
  208         xpt_flags       flags;
  209         u_int32_t       generation;
  210 };
  211 
  212 static const char quantum[] = "QUANTUM";
  213 static const char sony[] = "SONY";
  214 static const char west_digital[] = "WDIGTL";
  215 static const char samsung[] = "SAMSUNG";
  216 static const char seagate[] = "SEAGATE";
  217 static const char microp[] = "MICROP";
  218 
  219 static struct xpt_quirk_entry xpt_quirk_table[] = 
  220 {
  221         {
  222                 /* Reports QUEUE FULL for temporary resource shortages */
  223                 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP39100*", "*" },
  224                 /*quirks*/0, /*mintags*/24, /*maxtags*/32
  225         },
  226         {
  227                 /* Reports QUEUE FULL for temporary resource shortages */
  228                 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP34550*", "*" },
  229                 /*quirks*/0, /*mintags*/24, /*maxtags*/32
  230         },
  231         {
  232                 /* Reports QUEUE FULL for temporary resource shortages */
  233                 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP32275*", "*" },
  234                 /*quirks*/0, /*mintags*/24, /*maxtags*/32
  235         },
  236         {
  237                 /* Broken tagged queuing drive */
  238                 { T_DIRECT, SIP_MEDIA_FIXED, microp, "4421-07*", "*" },
  239                 /*quirks*/0, /*mintags*/0, /*maxtags*/
  240         },
  241         {
  242                 /* Broken tagged queuing drive */
  243                 { T_DIRECT, SIP_MEDIA_FIXED, "HP", "C372*", "*" },
  244                 /*quirks*/0, /*mintags*/0, /*maxtags*/
  245         },
  246         {
  247                 /* Broken tagged queuing drive */
  248                 { T_DIRECT, SIP_MEDIA_FIXED, microp, "3391*", "x43h" },
  249                 /*quirks*/0, /*mintags*/0, /*maxtags*/
  250         },
  251         {
  252                 /*
  253                  * Unfortunately, the Quantum Atlas III has the same
  254                  * problem as the Atlas II drives above.
  255                  * Reported by: "Johan Granlund" <johan@granlund.nu>
  256                  *
  257                  * For future reference, the drive with the problem was:
  258                  * QUANTUM QM39100TD-SW N1B0
  259                  * 
  260                  * It's possible that Quantum will fix the problem in later
  261                  * firmware revisions.  If that happens, the quirk entry
  262                  * will need to be made specific to the firmware revisions
  263                  * with the problem.
  264                  * 
  265                  */
  266                 /* Reports QUEUE FULL for temporary resource shortages */
  267                 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM39100*", "*" },
  268                 /*quirks*/0, /*mintags*/24, /*maxtags*/32
  269         },
  270         {
  271                 /*
  272                  * 18 Gig Atlas III, same problem as the 9G version.
  273                  * Reported by: Andre Albsmeier
  274                  *              <andre.albsmeier@mchp.siemens.de>
  275                  *
  276                  * For future reference, the drive with the problem was:
  277                  * QUANTUM QM318000TD-S N491
  278                  */
  279                 /* Reports QUEUE FULL for temporary resource shortages */
  280                 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM318000*", "*" },
  281                 /*quirks*/0, /*mintags*/24, /*maxtags*/32
  282         },
  283         {
  284                 /*
  285                  * Broken tagged queuing drive
  286                  * Reported by: Bret Ford <bford@uop.cs.uop.edu>
  287                  *         and: Martin Renters <martin@tdc.on.ca>
  288                  */
  289                 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST410800*", "71*" },
  290                 /*quirks*/0, /*mintags*/0, /*maxtags*/
  291         },
  292                 /*
  293                  * The Seagate Medalist Pro drives have very poor write
  294                  * performance with anything more than 2 tags.
  295                  * 
  296                  * Reported by:  Paul van der Zwan <paulz@trantor.xs4all.nl>
  297                  * Drive:  <SEAGATE ST36530N 1444>
  298                  *
  299                  * Reported by:  Jeremy Lea <reg@shale.csir.co.za>
  300                  * Drive:  <SEAGATE ST34520W 1281>
  301                  *
  302                  * No one has actually reported that the 9G version
  303                  * (ST39140*) of the Medalist Pro has the same problem, but
  304                  * we're assuming that it does because the 4G and 6.5G
  305                  * versions of the drive are broken.
  306                  */
  307         {
  308                 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST34520*", "*"},
  309                 /*quirks*/0, /*mintags*/2, /*maxtags*/2
  310         },
  311         {
  312                 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST36530*", "*"},
  313                 /*quirks*/0, /*mintags*/2, /*maxtags*/2
  314         },
  315         {
  316                 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST39140*", "*"},
  317                 /*quirks*/0, /*mintags*/2, /*maxtags*/2
  318         },
  319         {
  320                 /*
  321                  * Slow when tagged queueing is enabled.  Write performance
  322                  * steadily drops off with more and more concurrent
  323                  * transactions.  Best sequential write performance with
  324                  * tagged queueing turned off and write caching turned on.
  325                  *
  326                  * PR:  kern/10398
  327                  * Submitted by:  Hideaki Okada <hokada@isl.melco.co.jp>
  328                  * Drive:  DCAS-34330 w/ "S65A" firmware.
  329                  *
  330                  * The drive with the problem had the "S65A" firmware
  331                  * revision, and has also been reported (by Stephen J.
  332                  * Roznowski <sjr@home.net>) for a drive with the "S61A"
  333                  * firmware revision.
  334                  *
  335                  * Although no one has reported problems with the 2 gig
  336                  * version of the DCAS drive, the assumption is that it
  337                  * has the same problems as the 4 gig version.  Therefore
  338                  * this quirk entries disables tagged queueing for all
  339                  * DCAS drives.
  340                  */
  341                 { T_DIRECT, SIP_MEDIA_FIXED, "IBM", "DCAS*", "*" },
  342                 /*quirks*/0, /*mintags*/0, /*maxtags*/
  343         },
  344         {
  345                 /* Broken tagged queuing drive */
  346                 { T_DIRECT, SIP_MEDIA_REMOVABLE, "iomega", "jaz*", "*" },
  347                 /*quirks*/0, /*mintags*/0, /*maxtags*/
  348         },
  349         {
  350                 /* Broken tagged queuing drive */ 
  351                 { T_DIRECT, SIP_MEDIA_FIXED, "CONNER", "CFP2107*", "*" },
  352                 /*quirks*/0, /*mintags*/0, /*maxtags*/
  353         },
  354         {
  355                 /* Does not support other than LUN 0 */
  356                 { T_DIRECT, SIP_MEDIA_FIXED, "VMware*", "*", "*" },
  357                 CAM_QUIRK_NOLUNS, /*mintags*/2, /*maxtags*/255
  358         },
  359         {
  360                 /*
  361                  * Broken tagged queuing drive.
  362                  * Submitted by:
  363                  * NAKAJI Hiroyuki <nakaji@zeisei.dpri.kyoto-u.ac.jp>
  364                  * in PR kern/9535
  365                  */
  366                 { T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN34324U*", "*" },
  367                 /*quirks*/0, /*mintags*/0, /*maxtags*/
  368         },
  369         {
  370                 /*
  371                  * Slow when tagged queueing is enabled. (1.5MB/sec versus
  372                  * 8MB/sec.)
  373                  * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
  374                  * Best performance with these drives is achieved with
  375                  * tagged queueing turned off, and write caching turned on.
  376                  */
  377                 { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "WDE*", "*" },
  378                 /*quirks*/0, /*mintags*/0, /*maxtags*/
  379         },
  380         {
  381                 /*
  382                  * Slow when tagged queueing is enabled. (1.5MB/sec versus
  383                  * 8MB/sec.)
  384                  * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
  385                  * Best performance with these drives is achieved with
  386                  * tagged queueing turned off, and write caching turned on.
  387                  */
  388                 { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "ENTERPRISE", "*" },
  389                 /*quirks*/0, /*mintags*/0, /*maxtags*/
  390         },
  391         {
  392                 /*
  393                  * Doesn't handle queue full condition correctly,
  394                  * so we need to limit maxtags to what the device
  395                  * can handle instead of determining this automatically.
  396                  */
  397                 { T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN321010S*", "*" },
  398                 /*quirks*/0, /*mintags*/2, /*maxtags*/32
  399         },
  400         {
  401                 /* Really only one LUN */
  402                 { T_ENCLOSURE, SIP_MEDIA_FIXED, "SUN", "SENA", "*" },
  403                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
  404         },
  405         {
  406                 /* I can't believe we need a quirk for DPT volumes. */
  407                 { T_ANY, SIP_MEDIA_FIXED|SIP_MEDIA_REMOVABLE, "DPT", "*", "*" },
  408                 CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS,
  409                 /*mintags*/0, /*maxtags*/255
  410         },
  411         {
  412                 /*
  413                  * Many Sony CDROM drives don't like multi-LUN probing.
  414                  */
  415                 { T_CDROM, SIP_MEDIA_REMOVABLE, sony, "CD-ROM CDU*", "*" },
  416                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
  417         },
  418         {
  419                 /*
  420                  * This drive doesn't like multiple LUN probing.
  421                  * Submitted by:  Parag Patel <parag@cgt.com>
  422                  */
  423                 { T_WORM, SIP_MEDIA_REMOVABLE, sony, "CD-R   CDU9*", "*" },
  424                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
  425         },
  426         {
  427                 { T_WORM, SIP_MEDIA_REMOVABLE, "YAMAHA", "CDR100*", "*" },
  428                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
  429         },
  430         {
  431                 /*
  432                  * The 8200 doesn't like multi-lun probing, and probably
  433                  * don't like serial number requests either.
  434                  */
  435                 {
  436                         T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE",
  437                         "EXB-8200*", "*"
  438                 },
  439                 CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
  440         },
  441         {
  442                 /*
  443                  * Let's try the same as above, but for a drive that says
  444                  * it's an IPL-6860 but is actually an EXB 8200.
  445                  */
  446                 {
  447                         T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE",
  448                         "IPL-6860*", "*"
  449                 },
  450                 CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
  451         },
  452         {
  453                 /*
  454                  * These Hitachi drives don't like multi-lun probing.
  455                  * The PR submitter has a DK319H, but says that the Linux
  456                  * kernel has a similar work-around for the DK312 and DK314,
  457                  * so all DK31* drives are quirked here.
  458                  * PR:            misc/18793
  459                  * Submitted by:  Paul Haddad <paul@pth.com>
  460                  */
  461                 { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "DK31*", "*" },
  462                 CAM_QUIRK_NOLUNS, /*mintags*/2, /*maxtags*/255
  463         },
  464         {
  465                 /*
  466                  * This old revision of the TDC3600 is also SCSI-1, and
  467                  * hangs upon serial number probing.
  468                  */
  469                 {
  470                         T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "TANDBERG",
  471                         " TDC 3600", "U07:"
  472                 },
  473                 CAM_QUIRK_NOSERIAL, /*mintags*/0, /*maxtags*/
  474         },
  475         {
  476                 /*
  477                  * Would repond to all LUNs if asked for.
  478                  */
  479                 {
  480                         T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "CALIPER",
  481                         "CP150", "*"
  482                 },
  483                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
  484         },
  485         {
  486                 /*
  487                  * Would repond to all LUNs if asked for.
  488                  */
  489                 {
  490                         T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "KENNEDY",
  491                         "96X2*", "*"
  492                 },
  493                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
  494         },
  495         {
  496                 /* Submitted by: Matthew Dodd <winter@jurai.net> */
  497                 { T_PROCESSOR, SIP_MEDIA_FIXED, "Cabletrn", "EA41*", "*" },
  498                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
  499         },
  500         {
  501                 /* Submitted by: Matthew Dodd <winter@jurai.net> */
  502                 { T_PROCESSOR, SIP_MEDIA_FIXED, "CABLETRN", "EA41*", "*" },
  503                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
  504         },
  505         {
  506                 /* TeraSolutions special settings for TRC-22 RAID */
  507                 { T_DIRECT, SIP_MEDIA_FIXED, "TERASOLU", "TRC-22", "*" },
  508                   /*quirks*/0, /*mintags*/55, /*maxtags*/255
  509         },
  510         {
  511                 /* Veritas Storage Appliance */
  512                 { T_DIRECT, SIP_MEDIA_FIXED, "VERITAS", "*", "*" },
  513                   CAM_QUIRK_HILUNS, /*mintags*/2, /*maxtags*/1024
  514         },
  515         {
  516                 /*
  517                  * Would respond to all LUNs.  Device type and removable
  518                  * flag are jumper-selectable.
  519                  */
  520                 { T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED, "MaxOptix",
  521                   "Tahiti 1", "*"
  522                 },
  523                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
  524         },
  525         {
  526                 /* Default tagged queuing parameters for all devices */
  527                 {
  528                   T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
  529                   /*vendor*/"*", /*product*/"*", /*revision*/"*"
  530                 },
  531                 /*quirks*/0, /*mintags*/2, /*maxtags*/255
  532         },
  533 };
  534 
  535 static const int xpt_quirk_table_size =
  536         sizeof(xpt_quirk_table) / sizeof(*xpt_quirk_table);
  537 
  538 typedef enum {
  539         DM_RET_COPY             = 0x01,
  540         DM_RET_FLAG_MASK        = 0x0f,
  541         DM_RET_NONE             = 0x00,
  542         DM_RET_STOP             = 0x10,
  543         DM_RET_DESCEND          = 0x20,
  544         DM_RET_ERROR            = 0x30,
  545         DM_RET_ACTION_MASK      = 0xf0
  546 } dev_match_ret;
  547 
  548 typedef enum {
  549         XPT_DEPTH_BUS,
  550         XPT_DEPTH_TARGET,
  551         XPT_DEPTH_DEVICE,
  552         XPT_DEPTH_PERIPH
  553 } xpt_traverse_depth;
  554 
  555 struct xpt_traverse_config {
  556         xpt_traverse_depth      depth;
  557         void                    *tr_func;
  558         void                    *tr_arg;
  559 };
  560 
  561 typedef int     xpt_busfunc_t (struct cam_eb *bus, void *arg);
  562 typedef int     xpt_targetfunc_t (struct cam_et *target, void *arg);
  563 typedef int     xpt_devicefunc_t (struct cam_ed *device, void *arg);
  564 typedef int     xpt_periphfunc_t (struct cam_periph *periph, void *arg);
  565 typedef int     xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg);
  566 
  567 /* Transport layer configuration information */
  568 static struct xpt_softc xsoftc;
  569 
  570 /* Queues for our software interrupt handler */
  571 typedef TAILQ_HEAD(cam_isrq, ccb_hdr) cam_isrq_t;
  572 static cam_isrq_t cam_bioq;
  573 static cam_isrq_t cam_netq;
  574 
  575 /* "Pool" of inactive ccbs managed by xpt_alloc_ccb and xpt_free_ccb */
  576 static SLIST_HEAD(,ccb_hdr) ccb_freeq;
  577 static u_int xpt_max_ccbs;      /*
  578                                  * Maximum size of ccb pool.  Modified as
  579                                  * devices are added/removed or have their
  580                                  * opening counts changed.
  581                                  */
  582 static u_int xpt_ccb_count;     /* Current count of allocated ccbs */
  583 
  584 struct cam_periph *xpt_periph;
  585 
  586 static periph_init_t xpt_periph_init;
  587 
  588 static periph_init_t probe_periph_init;
  589 
  590 static struct periph_driver xpt_driver =
  591 {
  592         xpt_periph_init, "xpt",
  593         TAILQ_HEAD_INITIALIZER(xpt_driver.units)
  594 };
  595 
  596 static struct periph_driver probe_driver =
  597 {
  598         probe_periph_init, "probe",
  599         TAILQ_HEAD_INITIALIZER(probe_driver.units)
  600 };
  601 
  602 DATA_SET(periphdriver_set, xpt_driver);
  603 DATA_SET(periphdriver_set, probe_driver);
  604 
  605 #define XPT_CDEV_MAJOR 104
  606 
  607 static d_open_t xptopen;
  608 static d_close_t xptclose;
  609 static d_ioctl_t xptioctl;
  610 
  611 static struct cdevsw xpt_cdevsw = {
  612         /* open */      xptopen,
  613         /* close */     xptclose,
  614         /* read */      noread,
  615         /* write */     nowrite,
  616         /* ioctl */     xptioctl,
  617         /* poll */      nopoll,
  618         /* mmap */      nommap,
  619         /* strategy */  nostrategy,
  620         /* name */      "xpt",
  621         /* maj */       XPT_CDEV_MAJOR,
  622         /* dump */      nodump,
  623         /* psize */     nopsize,
  624         /* flags */     0,
  625         /* bmaj */      -1
  626 };
  627 
  628 static struct intr_config_hook *xpt_config_hook;
  629 
  630 /* Registered busses */
  631 static TAILQ_HEAD(,cam_eb) xpt_busses;
  632 static u_int bus_generation;
  633 
  634 /* Storage for debugging datastructures */
  635 #ifdef  CAMDEBUG
  636 struct cam_path *cam_dpath;
  637 u_int32_t cam_dflags;
  638 u_int32_t cam_debug_delay;
  639 #endif
  640 
  641 #if defined(CAM_DEBUG_FLAGS) && !defined(CAMDEBUG)
  642 #error "You must have options CAMDEBUG to use options CAM_DEBUG_FLAGS"
  643 #endif
  644 
  645 /*
  646  * In order to enable the CAM_DEBUG_* options, the user must have CAMDEBUG
  647  * enabled.  Also, the user must have either none, or all of CAM_DEBUG_BUS,
  648  * CAM_DEBUG_TARGET, and CAM_DEBUG_LUN specified.
  649  */
  650 #if defined(CAM_DEBUG_BUS) || defined(CAM_DEBUG_TARGET) \
  651     || defined(CAM_DEBUG_LUN)
  652 #ifdef CAMDEBUG
  653 #if !defined(CAM_DEBUG_BUS) || !defined(CAM_DEBUG_TARGET) \
  654     || !defined(CAM_DEBUG_LUN)
  655 #error "You must define all or none of CAM_DEBUG_BUS, CAM_DEBUG_TARGET \
  656         and CAM_DEBUG_LUN"
  657 #endif /* !CAM_DEBUG_BUS || !CAM_DEBUG_TARGET || !CAM_DEBUG_LUN */
  658 #else /* !CAMDEBUG */
  659 #error "You must use options CAMDEBUG if you use the CAM_DEBUG_* options"
  660 #endif /* CAMDEBUG */
  661 #endif /* CAM_DEBUG_BUS || CAM_DEBUG_TARGET || CAM_DEBUG_LUN */
  662 
  663 /* Our boot-time initialization hook */
  664 static int cam_module_event_handler(module_t, int /*modeventtype_t*/, void *);
  665 
  666 static moduledata_t cam_moduledata = {
  667         "cam",
  668         cam_module_event_handler,
  669         NULL
  670 };
  671 
  672 static void     xpt_init(void *);
  673 
  674 DECLARE_MODULE(cam, cam_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND);
  675 MODULE_VERSION(cam, 1);
  676 
  677 
  678 static cam_status       xpt_compile_path(struct cam_path *new_path,
  679                                          struct cam_periph *perph,
  680                                          path_id_t path_id,
  681                                          target_id_t target_id,
  682                                          lun_id_t lun_id);
  683 
  684 static void             xpt_release_path(struct cam_path *path);
  685 
  686 static void             xpt_async_bcast(struct async_list *async_head,
  687                                         u_int32_t async_code,
  688                                         struct cam_path *path,
  689                                         void *async_arg);
  690 static void             xpt_dev_async(u_int32_t async_code,
  691                                       struct cam_eb *bus,
  692                                       struct cam_et *target,
  693                                       struct cam_ed *device,
  694                                       void *async_arg);
  695 static path_id_t xptnextfreepathid(void);
  696 static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus);
  697 static union ccb *xpt_get_ccb(struct cam_ed *device);
  698 static int       xpt_schedule_dev(struct camq *queue, cam_pinfo *dev_pinfo,
  699                                   u_int32_t new_priority);
  700 static void      xpt_run_dev_allocq(struct cam_eb *bus);
  701 static void      xpt_run_dev_sendq(struct cam_eb *bus);
  702 static timeout_t xpt_release_devq_timeout;
  703 static timeout_t xpt_release_simq_timeout;
  704 static void      xpt_release_bus(struct cam_eb *bus);
  705 static void      xpt_release_devq_device(struct cam_ed *dev, u_int count,
  706                                          int run_queue);
  707 static struct cam_et*
  708                  xpt_alloc_target(struct cam_eb *bus, target_id_t target_id);
  709 static void      xpt_release_target(struct cam_eb *bus, struct cam_et *target);
  710 static struct cam_ed*
  711                  xpt_alloc_device(struct cam_eb *bus, struct cam_et *target,
  712                                   lun_id_t lun_id);
  713 static void      xpt_release_device(struct cam_eb *bus, struct cam_et *target,
  714                                     struct cam_ed *device);
  715 static u_int32_t xpt_dev_ccbq_resize(struct cam_path *path, int newopenings);
  716 static struct cam_eb*
  717                  xpt_find_bus(path_id_t path_id);
  718 static struct cam_et*
  719                  xpt_find_target(struct cam_eb *bus, target_id_t target_id);
  720 static struct cam_ed*
  721                  xpt_find_device(struct cam_et *target, lun_id_t lun_id);
  722 static void      xpt_scan_bus(struct cam_periph *periph, union ccb *ccb);
  723 static void      xpt_scan_lun(struct cam_periph *periph,
  724                               struct cam_path *path, cam_flags flags,
  725                               union ccb *ccb);
  726 static void      xptscandone(struct cam_periph *periph, union ccb *done_ccb);
  727 static xpt_busfunc_t    xptconfigbuscountfunc;
  728 static xpt_busfunc_t    xptconfigfunc;
  729 static void      xpt_config(void *arg);
  730 static xpt_devicefunc_t xptpassannouncefunc;
  731 static void      xpt_finishconfig(struct cam_periph *periph, union ccb *ccb);
  732 static void      xptaction(struct cam_sim *sim, union ccb *work_ccb);
  733 static void      xptpoll(struct cam_sim *sim);
  734 static swihand_t swi_camnet;
  735 static swihand_t swi_cambio;
  736 static void      camisr(cam_isrq_t *queue);
  737 #if 0
  738 static void      xptstart(struct cam_periph *periph, union ccb *work_ccb);
  739 static void      xptasync(struct cam_periph *periph,
  740                           u_int32_t code, cam_path *path);
  741 #endif
  742 static dev_match_ret    xptbusmatch(struct dev_match_pattern *patterns,
  743                                     int num_patterns, struct cam_eb *bus);
  744 static dev_match_ret    xptdevicematch(struct dev_match_pattern *patterns,
  745                                        int num_patterns, struct cam_ed *device);
  746 static dev_match_ret    xptperiphmatch(struct dev_match_pattern *patterns,
  747                                        int num_patterns,
  748                                        struct cam_periph *periph);
  749 static xpt_busfunc_t    xptedtbusfunc;
  750 static xpt_targetfunc_t xptedttargetfunc;
  751 static xpt_devicefunc_t xptedtdevicefunc;
  752 static xpt_periphfunc_t xptedtperiphfunc;
  753 static xpt_pdrvfunc_t   xptplistpdrvfunc;
  754 static xpt_periphfunc_t xptplistperiphfunc;
  755 static int              xptedtmatch(struct ccb_dev_match *cdm);
  756 static int              xptperiphlistmatch(struct ccb_dev_match *cdm);
  757 static int              xptbustraverse(struct cam_eb *start_bus,
  758                                        xpt_busfunc_t *tr_func, void *arg);
  759 static int              xpttargettraverse(struct cam_eb *bus,
  760                                           struct cam_et *start_target,
  761                                           xpt_targetfunc_t *tr_func, void *arg);
  762 static int              xptdevicetraverse(struct cam_et *target,
  763                                           struct cam_ed *start_device,
  764                                           xpt_devicefunc_t *tr_func, void *arg);
  765 static int              xptperiphtraverse(struct cam_ed *device,
  766                                           struct cam_periph *start_periph,
  767                                           xpt_periphfunc_t *tr_func, void *arg);
  768 static int              xptpdrvtraverse(struct periph_driver **start_pdrv,
  769                                         xpt_pdrvfunc_t *tr_func, void *arg);
  770 static int              xptpdperiphtraverse(struct periph_driver **pdrv,
  771                                             struct cam_periph *start_periph,
  772                                             xpt_periphfunc_t *tr_func,
  773                                             void *arg);
  774 static xpt_busfunc_t    xptdefbusfunc;
  775 static xpt_targetfunc_t xptdeftargetfunc;
  776 static xpt_devicefunc_t xptdefdevicefunc;
  777 static xpt_periphfunc_t xptdefperiphfunc;
  778 static int              xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg);
  779 #ifdef notusedyet
  780 static int              xpt_for_all_targets(xpt_targetfunc_t *tr_func,
  781                                             void *arg);
  782 #endif
  783 static int              xpt_for_all_devices(xpt_devicefunc_t *tr_func,
  784                                             void *arg);
  785 #ifdef notusedyet
  786 static int              xpt_for_all_periphs(xpt_periphfunc_t *tr_func,
  787                                             void *arg);
  788 #endif
  789 static xpt_devicefunc_t xptsetasyncfunc;
  790 static xpt_busfunc_t    xptsetasyncbusfunc;
  791 static cam_status       xptregister(struct cam_periph *periph,
  792                                     void *arg);
  793 static cam_status       proberegister(struct cam_periph *periph,
  794                                       void *arg);
  795 static void      probeschedule(struct cam_periph *probe_periph);
  796 static void      probestart(struct cam_periph *periph, union ccb *start_ccb);
  797 static void      proberequestdefaultnegotiation(struct cam_periph *periph);
  798 static void      probedone(struct cam_periph *periph, union ccb *done_ccb);
  799 static void      probecleanup(struct cam_periph *periph);
  800 static void      xpt_find_quirk(struct cam_ed *device);
  801 static void      xpt_set_transfer_settings(struct ccb_trans_settings *cts,
  802                                            struct cam_ed *device,
  803                                            int async_update);
  804 static void      xpt_toggle_tags(struct cam_path *path);
  805 static void      xpt_start_tags(struct cam_path *path);
  806 static __inline int xpt_schedule_dev_allocq(struct cam_eb *bus,
  807                                             struct cam_ed *dev);
  808 static __inline int xpt_schedule_dev_sendq(struct cam_eb *bus,
  809                                            struct cam_ed *dev);
  810 static __inline int periph_is_queued(struct cam_periph *periph);
  811 static __inline int device_is_alloc_queued(struct cam_ed *device);
  812 static __inline int device_is_send_queued(struct cam_ed *device);
  813 static __inline int dev_allocq_is_runnable(struct cam_devq *devq);
  814 
  815 static __inline int
  816 xpt_schedule_dev_allocq(struct cam_eb *bus, struct cam_ed *dev)
  817 {
  818         int retval;
  819 
  820         if (dev->ccbq.devq_openings > 0) {
  821                 if ((dev->flags & CAM_DEV_RESIZE_QUEUE_NEEDED) != 0) {
  822                         cam_ccbq_resize(&dev->ccbq,
  823                                         dev->ccbq.dev_openings
  824                                         + dev->ccbq.dev_active);
  825                         dev->flags &= ~CAM_DEV_RESIZE_QUEUE_NEEDED;
  826                 }
  827                 /*
  828                  * The priority of a device waiting for CCB resources
  829                  * is that of the the highest priority peripheral driver
  830                  * enqueued.
  831                  */
  832                 retval = xpt_schedule_dev(&bus->sim->devq->alloc_queue,
  833                                           &dev->alloc_ccb_entry.pinfo,
  834                                           CAMQ_GET_HEAD(&dev->drvq)->priority); 
  835         } else {
  836                 retval = 0;
  837         }
  838 
  839         return (retval);
  840 }
  841 
  842 static __inline int
  843 xpt_schedule_dev_sendq(struct cam_eb *bus, struct cam_ed *dev)
  844 {
  845         int     retval;
  846 
  847         if (dev->ccbq.dev_openings > 0) {
  848                 /*
  849                  * The priority of a device waiting for controller
  850                  * resources is that of the the highest priority CCB
  851                  * enqueued.
  852                  */
  853                 retval =
  854                     xpt_schedule_dev(&bus->sim->devq->send_queue,
  855                                      &dev->send_ccb_entry.pinfo,
  856                                      CAMQ_GET_HEAD(&dev->ccbq.queue)->priority);
  857         } else {
  858                 retval = 0;
  859         }
  860         return (retval);
  861 }
  862 
  863 static __inline int
  864 periph_is_queued(struct cam_periph *periph)
  865 {
  866         return (periph->pinfo.index != CAM_UNQUEUED_INDEX);
  867 }
  868 
  869 static __inline int
  870 device_is_alloc_queued(struct cam_ed *device)
  871 {
  872         return (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
  873 }
  874 
  875 static __inline int
  876 device_is_send_queued(struct cam_ed *device)
  877 {
  878         return (device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
  879 }
  880 
  881 static __inline int
  882 dev_allocq_is_runnable(struct cam_devq *devq)
  883 {
  884         /*
  885          * Have work to do.
  886          * Have space to do more work.
  887          * Allowed to do work.
  888          */
  889         return ((devq->alloc_queue.qfrozen_cnt == 0)
  890              && (devq->alloc_queue.entries > 0)
  891              && (devq->alloc_openings > 0));
  892 }
  893 
  894 static void
  895 xpt_periph_init()
  896 {
  897         make_dev(&xpt_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "xpt0");
  898 }
  899 
  900 static void
  901 probe_periph_init()
  902 {
  903 }
  904 
  905 
  906 static void
  907 xptdone(struct cam_periph *periph, union ccb *done_ccb)
  908 {
  909         /* Caller will release the CCB */
  910         wakeup(&done_ccb->ccb_h.cbfcnp);
  911 }
  912 
  913 static int
  914 xptopen(dev_t dev, int flags, int fmt, struct proc *p)
  915 {
  916         int unit;
  917 
  918         unit = minor(dev) & 0xff;
  919 
  920         /*
  921          * Only allow read-write access.
  922          */
  923         if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0))
  924                 return(EPERM);
  925 
  926         /*
  927          * We don't allow nonblocking access.
  928          */
  929         if ((flags & O_NONBLOCK) != 0) {
  930                 printf("xpt%d: can't do nonblocking access\n", unit);
  931                 return(ENODEV);
  932         }
  933 
  934         /*
  935          * We only have one transport layer right now.  If someone accesses
  936          * us via something other than minor number 1, point out their
  937          * mistake.
  938          */
  939         if (unit != 0) {
  940                 printf("xptopen: got invalid xpt unit %d\n", unit);
  941                 return(ENXIO);
  942         }
  943 
  944         /* Mark ourselves open */
  945         xsoftc.flags |= XPT_FLAG_OPEN;
  946         
  947         return(0);
  948 }
  949 
  950 static int
  951 xptclose(dev_t dev, int flag, int fmt, struct proc *p)
  952 {
  953         int unit;
  954 
  955         unit = minor(dev) & 0xff;
  956 
  957         /*
  958          * We only have one transport layer right now.  If someone accesses
  959          * us via something other than minor number 1, point out their
  960          * mistake.
  961          */
  962         if (unit != 0) {
  963                 printf("xptclose: got invalid xpt unit %d\n", unit);
  964                 return(ENXIO);
  965         }
  966 
  967         /* Mark ourselves closed */
  968         xsoftc.flags &= ~XPT_FLAG_OPEN;
  969 
  970         return(0);
  971 }
  972 
  973 static int
  974 xptioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct proc *p)
  975 {
  976         int unit, error;
  977 
  978         error = 0;
  979         unit = minor(dev) & 0xff;
  980 
  981         /*
  982          * We only have one transport layer right now.  If someone accesses
  983          * us via something other than minor number 1, point out their
  984          * mistake.
  985          */
  986         if (unit != 0) {
  987                 printf("xptioctl: got invalid xpt unit %d\n", unit);
  988                 return(ENXIO);
  989         }
  990 
  991         switch(cmd) {
  992         /*
  993          * For the transport layer CAMIOCOMMAND ioctl, we really only want
  994          * to accept CCB types that don't quite make sense to send through a
  995          * passthrough driver.
  996          */
  997         case CAMIOCOMMAND: {
  998                 union ccb *ccb;
  999                 union ccb *inccb;
 1000 
 1001                 inccb = (union ccb *)addr;
 1002 
 1003                 switch(inccb->ccb_h.func_code) {
 1004                 case XPT_SCAN_BUS:
 1005                 case XPT_RESET_BUS:
 1006                         if ((inccb->ccb_h.target_id != CAM_TARGET_WILDCARD)
 1007                          || (inccb->ccb_h.target_lun != CAM_LUN_WILDCARD)) {
 1008                                 error = EINVAL;
 1009                                 break;
 1010                         }
 1011                         /* FALLTHROUGH */
 1012                 case XPT_PATH_INQ:
 1013                 case XPT_ENG_INQ:
 1014                 case XPT_SCAN_LUN:
 1015 
 1016                         ccb = xpt_alloc_ccb();
 1017 
 1018                         /*
 1019                          * Create a path using the bus, target, and lun the
 1020                          * user passed in.
 1021                          */
 1022                         if (xpt_create_path(&ccb->ccb_h.path, xpt_periph,
 1023                                             inccb->ccb_h.path_id,
 1024                                             inccb->ccb_h.target_id,
 1025                                             inccb->ccb_h.target_lun) !=
 1026                                             CAM_REQ_CMP){
 1027                                 error = EINVAL;
 1028                                 xpt_free_ccb(ccb);
 1029                                 break;
 1030                         }
 1031                         /* Ensure all of our fields are correct */
 1032                         xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path,
 1033                                       inccb->ccb_h.pinfo.priority);
 1034                         xpt_merge_ccb(ccb, inccb);
 1035                         ccb->ccb_h.cbfcnp = xptdone;
 1036                         cam_periph_runccb(ccb, NULL, 0, 0, NULL);
 1037                         bcopy(ccb, inccb, sizeof(union ccb));
 1038                         xpt_free_path(ccb->ccb_h.path);
 1039                         xpt_free_ccb(ccb);
 1040                         break;
 1041 
 1042                 case XPT_DEBUG: {
 1043                         union ccb ccb;
 1044 
 1045                         /*
 1046                          * This is an immediate CCB, so it's okay to
 1047                          * allocate it on the stack.
 1048                          */
 1049 
 1050                         /*
 1051                          * Create a path using the bus, target, and lun the
 1052                          * user passed in.
 1053                          */
 1054                         if (xpt_create_path(&ccb.ccb_h.path, xpt_periph,
 1055                                             inccb->ccb_h.path_id,
 1056                                             inccb->ccb_h.target_id,
 1057                                             inccb->ccb_h.target_lun) !=
 1058                                             CAM_REQ_CMP){
 1059                                 error = EINVAL;
 1060                                 break;
 1061                         }
 1062                         /* Ensure all of our fields are correct */
 1063                         xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path,
 1064                                       inccb->ccb_h.pinfo.priority);
 1065                         xpt_merge_ccb(&ccb, inccb);
 1066                         ccb.ccb_h.cbfcnp = xptdone;
 1067                         xpt_action(&ccb);
 1068                         bcopy(&ccb, inccb, sizeof(union ccb));
 1069                         xpt_free_path(ccb.ccb_h.path);
 1070                         break;
 1071 
 1072                 }
 1073                 case XPT_DEV_MATCH: {
 1074                         struct cam_periph_map_info mapinfo;
 1075                         struct cam_path *old_path;
 1076 
 1077                         /*
 1078                          * We can't deal with physical addresses for this
 1079                          * type of transaction.
 1080                          */
 1081                         if (inccb->ccb_h.flags & CAM_DATA_PHYS) {
 1082                                 error = EINVAL;
 1083                                 break;
 1084                         }
 1085 
 1086                         /*
 1087                          * Save this in case the caller had it set to
 1088                          * something in particular.
 1089                          */
 1090                         old_path = inccb->ccb_h.path;
 1091 
 1092                         /*
 1093                          * We really don't need a path for the matching
 1094                          * code.  The path is needed because of the
 1095                          * debugging statements in xpt_action().  They
 1096                          * assume that the CCB has a valid path.
 1097                          */
 1098                         inccb->ccb_h.path = xpt_periph->path;
 1099 
 1100                         bzero(&mapinfo, sizeof(mapinfo));
 1101 
 1102                         /*
 1103                          * Map the pattern and match buffers into kernel
 1104                          * virtual address space.
 1105                          */
 1106                         error = cam_periph_mapmem(inccb, &mapinfo);
 1107 
 1108                         if (error) {
 1109                                 inccb->ccb_h.path = old_path;
 1110                                 break;
 1111                         }
 1112 
 1113                         /*
 1114                          * This is an immediate CCB, we can send it on directly.
 1115                          */
 1116                         xpt_action(inccb);
 1117 
 1118                         /*
 1119                          * Map the buffers back into user space.
 1120                          */
 1121                         cam_periph_unmapmem(inccb, &mapinfo);
 1122 
 1123                         inccb->ccb_h.path = old_path;
 1124 
 1125                         error = 0;
 1126                         break;
 1127                 }
 1128                 default:
 1129                         error = ENOTSUP;
 1130                         break;
 1131                 }
 1132                 break;
 1133         }
 1134         /*
 1135          * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input,
 1136          * with the periphal driver name and unit name filled in.  The other
 1137          * fields don't really matter as input.  The passthrough driver name
 1138          * ("pass"), and unit number are passed back in the ccb.  The current
 1139          * device generation number, and the index into the device peripheral
 1140          * driver list, and the status are also passed back.  Note that
 1141          * since we do everything in one pass, unlike the XPT_GDEVLIST ccb,
 1142          * we never return a status of CAM_GDEVLIST_LIST_CHANGED.  It is
 1143          * (or rather should be) impossible for the device peripheral driver
 1144          * list to change since we look at the whole thing in one pass, and
 1145          * we do it with splcam protection.
 1146          * 
 1147          */
 1148         case CAMGETPASSTHRU: {
 1149                 union ccb *ccb;
 1150                 struct cam_periph *periph;
 1151                 struct periph_driver **p_drv;
 1152                 char   *name;
 1153                 int unit;
 1154                 int cur_generation;
 1155                 int base_periph_found;
 1156                 int splbreaknum;
 1157                 int s;
 1158 
 1159                 ccb = (union ccb *)addr;
 1160                 unit = ccb->cgdl.unit_number;
 1161                 name = ccb->cgdl.periph_name;
 1162                 /*
 1163                  * Every 100 devices, we want to drop our spl protection to
 1164                  * give the software interrupt handler a chance to run.
 1165                  * Most systems won't run into this check, but this should
 1166                  * avoid starvation in the software interrupt handler in
 1167                  * large systems.
 1168                  */
 1169                 splbreaknum = 100;
 1170 
 1171                 ccb = (union ccb *)addr;
 1172 
 1173                 base_periph_found = 0;
 1174 
 1175                 /*
 1176                  * Sanity check -- make sure we don't get a null peripheral
 1177                  * driver name.
 1178                  */
 1179                 if (*ccb->cgdl.periph_name == '\0') {
 1180                         error = EINVAL;
 1181                         break;
 1182                 }
 1183 
 1184                 /* Keep the list from changing while we traverse it */
 1185                 s = splcam();
 1186 ptstartover:
 1187                 cur_generation = xsoftc.generation;
 1188 
 1189                 /* first find our driver in the list of drivers */
 1190                 for (p_drv = (struct periph_driver **)periphdriver_set.ls_items;
 1191                      *p_drv != NULL; p_drv++)
 1192                         if (strcmp((*p_drv)->driver_name, name) == 0)
 1193                                 break;
 1194 
 1195                 if (*p_drv == NULL) {
 1196                         splx(s);
 1197                         ccb->ccb_h.status = CAM_REQ_CMP_ERR;
 1198                         ccb->cgdl.status = CAM_GDEVLIST_ERROR;
 1199                         *ccb->cgdl.periph_name = '\0';
 1200                         ccb->cgdl.unit_number = 0;
 1201                         error = ENOENT;
 1202                         break;
 1203                 }       
 1204 
 1205                 /*
 1206                  * Run through every peripheral instance of this driver
 1207                  * and check to see whether it matches the unit passed
 1208                  * in by the user.  If it does, get out of the loops and
 1209                  * find the passthrough driver associated with that
 1210                  * peripheral driver.
 1211                  */
 1212                 for (periph = TAILQ_FIRST(&(*p_drv)->units); periph != NULL;
 1213                      periph = TAILQ_NEXT(periph, unit_links)) {
 1214 
 1215                         if (periph->unit_number == unit) {
 1216                                 break;
 1217                         } else if (--splbreaknum == 0) {
 1218                                 splx(s);
 1219                                 s = splcam();
 1220                                 splbreaknum = 100;
 1221                                 if (cur_generation != xsoftc.generation)
 1222                                        goto ptstartover;
 1223                         }
 1224                 }
 1225                 /*
 1226                  * If we found the peripheral driver that the user passed
 1227                  * in, go through all of the peripheral drivers for that
 1228                  * particular device and look for a passthrough driver.
 1229                  */
 1230                 if (periph != NULL) {
 1231                         struct cam_ed *device;
 1232                         int i;
 1233 
 1234                         base_periph_found = 1;
 1235                         device = periph->path->device;
 1236                         for (i = 0, periph = device->periphs.slh_first;
 1237                              periph != NULL;
 1238                              periph = periph->periph_links.sle_next, i++) {
 1239                                 /*
 1240                                  * Check to see whether we have a
 1241                                  * passthrough device or not. 
 1242                                  */
 1243                                 if (strcmp(periph->periph_name, "pass") == 0) {
 1244                                         /*
 1245                                          * Fill in the getdevlist fields.
 1246                                          */
 1247                                         strcpy(ccb->cgdl.periph_name,
 1248                                                periph->periph_name);
 1249                                         ccb->cgdl.unit_number =
 1250                                                 periph->unit_number;
 1251                                         if (periph->periph_links.sle_next)
 1252                                                 ccb->cgdl.status =
 1253                                                         CAM_GDEVLIST_MORE_DEVS;
 1254                                         else
 1255                                                 ccb->cgdl.status =
 1256                                                        CAM_GDEVLIST_LAST_DEVICE;
 1257                                         ccb->cgdl.generation =
 1258                                                 device->generation;
 1259                                         ccb->cgdl.index = i;
 1260                                         /*
 1261                                          * Fill in some CCB header fields
 1262                                          * that the user may want.
 1263                                          */
 1264                                         ccb->ccb_h.path_id =
 1265                                                 periph->path->bus->path_id;
 1266                                         ccb->ccb_h.target_id =
 1267                                                 periph->path->target->target_id;
 1268                                         ccb->ccb_h.target_lun =
 1269                                                 periph->path->device->lun_id;
 1270                                         ccb->ccb_h.status = CAM_REQ_CMP;
 1271                                         break;
 1272                                 }
 1273                         }
 1274                 }
 1275 
 1276                 /*
 1277                  * If the periph is null here, one of two things has
 1278                  * happened.  The first possibility is that we couldn't
 1279                  * find the unit number of the particular peripheral driver
 1280                  * that the user is asking about.  e.g. the user asks for
 1281                  * the passthrough driver for "da11".  We find the list of
 1282                  * "da" peripherals all right, but there is no unit 11.
 1283                  * The other possibility is that we went through the list
 1284                  * of peripheral drivers attached to the device structure,
 1285                  * but didn't find one with the name "pass".  Either way,
 1286                  * we return ENOENT, since we couldn't find something.
 1287                  */
 1288                 if (periph == NULL) {
 1289                         ccb->ccb_h.status = CAM_REQ_CMP_ERR;
 1290                         ccb->cgdl.status = CAM_GDEVLIST_ERROR;
 1291                         *ccb->cgdl.periph_name = '\0';
 1292                         ccb->cgdl.unit_number = 0;
 1293                         error = ENOENT;
 1294                         /*
 1295                          * It is unfortunate that this is even necessary,
 1296                          * but there are many, many clueless users out there.
 1297                          * If this is true, the user is looking for the
 1298                          * passthrough driver, but doesn't have one in his
 1299                          * kernel.
 1300                          */
 1301                         if (base_periph_found == 1) {
 1302                                 printf("xptioctl: pass driver is not in the "
 1303                                        "kernel\n");
 1304                                 printf("xptioctl: put \"device pass0\" in "
 1305                                        "your kernel config file\n");
 1306                         }
 1307                 }
 1308                 splx(s);
 1309                 break;
 1310                 }
 1311         default:
 1312                 error = ENOTTY;
 1313                 break;
 1314         }
 1315 
 1316         return(error);
 1317 }
 1318 
 1319 static int
 1320 cam_module_event_handler(module_t mod, int what, void *arg)
 1321 {
 1322         if (what == MOD_LOAD) {
 1323                 xpt_init(NULL);
 1324         } else if (what == MOD_UNLOAD) {
 1325                 return EBUSY;
 1326         }
 1327 
 1328         return 0;
 1329 }
 1330 
 1331 /* Functions accessed by the peripheral drivers */
 1332 static void
 1333 xpt_init(dummy)
 1334         void *dummy;
 1335 {
 1336         struct cam_sim *xpt_sim;
 1337         struct cam_path *path;
 1338         struct cam_devq *devq;
 1339         cam_status status;
 1340 
 1341         TAILQ_INIT(&xpt_busses);
 1342         TAILQ_INIT(&cam_bioq);
 1343         TAILQ_INIT(&cam_netq);
 1344         SLIST_INIT(&ccb_freeq);
 1345         STAILQ_INIT(&highpowerq);
 1346 
 1347         /*
 1348          * The xpt layer is, itself, the equivelent of a SIM.
 1349          * Allow 16 ccbs in the ccb pool for it.  This should
 1350          * give decent parallelism when we probe busses and
 1351          * perform other XPT functions.
 1352          */
 1353         devq = cam_simq_alloc(16);
 1354         xpt_sim = cam_sim_alloc(xptaction,
 1355                                 xptpoll,
 1356                                 "xpt",
 1357                                 /*softc*/NULL,
 1358                                 /*unit*/0,
 1359                                 /*max_dev_transactions*/0,
 1360                                 /*max_tagged_dev_transactions*/0,
 1361                                 devq);
 1362         xpt_max_ccbs = 16;
 1363                                 
 1364         xpt_bus_register(xpt_sim, /*bus #*/0);
 1365 
 1366         /*
 1367          * Looking at the XPT from the SIM layer, the XPT is
 1368          * the equivelent of a peripheral driver.  Allocate
 1369          * a peripheral driver entry for us.
 1370          */
 1371         if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID,
 1372                                       CAM_TARGET_WILDCARD,
 1373                                       CAM_LUN_WILDCARD)) != CAM_REQ_CMP) {
 1374                 printf("xpt_init: xpt_create_path failed with status %#x,"
 1375                        " failing attach\n", status);
 1376                 return;
 1377         }
 1378 
 1379         cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO,
 1380                          path, NULL, 0, NULL);
 1381         xpt_free_path(path);
 1382 
 1383         xpt_sim->softc = xpt_periph;
 1384 
 1385         /*
 1386          * Register a callback for when interrupts are enabled.
 1387          */
 1388         xpt_config_hook =
 1389             (struct intr_config_hook *)malloc(sizeof(struct intr_config_hook),
 1390                                               M_TEMP, M_NOWAIT | M_ZERO);
 1391         if (xpt_config_hook == NULL) {
 1392                 printf("xpt_init: Cannot malloc config hook "
 1393                        "- failing attach\n");
 1394                 return;
 1395         }
 1396 
 1397         xpt_config_hook->ich_func = xpt_config;
 1398         if (config_intrhook_establish(xpt_config_hook) != 0) {
 1399                 free (xpt_config_hook, M_TEMP);
 1400                 printf("xpt_init: config_intrhook_establish failed "
 1401                        "- failing attach\n");
 1402         }
 1403 
 1404         /* Install our software interrupt handlers */
 1405         register_swi(SWI_CAMNET, swi_camnet);
 1406         register_swi(SWI_CAMBIO, swi_cambio);
 1407 }
 1408 
 1409 static cam_status
 1410 xptregister(struct cam_periph *periph, void *arg)
 1411 {
 1412         if (periph == NULL) {
 1413                 printf("xptregister: periph was NULL!!\n");
 1414                 return(CAM_REQ_CMP_ERR);
 1415         }
 1416 
 1417         periph->softc = NULL;
 1418 
 1419         xpt_periph = periph;
 1420 
 1421         return(CAM_REQ_CMP);
 1422 }
 1423 
 1424 int32_t
 1425 xpt_add_periph(struct cam_periph *periph)
 1426 {
 1427         struct cam_ed *device;
 1428         int32_t  status;
 1429         struct periph_list *periph_head;
 1430 
 1431         device = periph->path->device;
 1432 
 1433         periph_head = &device->periphs;
 1434 
 1435         status = CAM_REQ_CMP;
 1436 
 1437         if (device != NULL) {
 1438                 int s;
 1439 
 1440                 /*
 1441                  * Make room for this peripheral
 1442                  * so it will fit in the queue
 1443                  * when it's scheduled to run
 1444                  */
 1445                 s = splsoftcam();
 1446                 status = camq_resize(&device->drvq,
 1447                                      device->drvq.array_size + 1);
 1448 
 1449                 device->generation++;
 1450 
 1451                 SLIST_INSERT_HEAD(periph_head, periph, periph_links);
 1452 
 1453                 splx(s);
 1454         }
 1455 
 1456         xsoftc.generation++;
 1457 
 1458         return (status);
 1459 }
 1460 
 1461 void
 1462 xpt_remove_periph(struct cam_periph *periph)
 1463 {
 1464         struct cam_ed *device;
 1465 
 1466         device = periph->path->device;
 1467 
 1468         if (device != NULL) {
 1469                 int s;
 1470                 struct periph_list *periph_head;
 1471 
 1472                 periph_head = &device->periphs;
 1473                 
 1474                 /* Release the slot for this peripheral */
 1475                 s = splsoftcam();
 1476                 camq_resize(&device->drvq, device->drvq.array_size - 1);
 1477 
 1478                 device->generation++;
 1479 
 1480                 SLIST_REMOVE(periph_head, periph, cam_periph, periph_links);
 1481 
 1482                 splx(s);
 1483         }
 1484 
 1485         xsoftc.generation++;
 1486 
 1487 }
 1488 
 1489 void
 1490 xpt_announce_periph(struct cam_periph *periph, char *announce_string)
 1491 {
 1492         int s;
 1493         u_int mb;
 1494         struct cam_path *path;
 1495         struct ccb_trans_settings cts;
 1496 
 1497         path = periph->path;
 1498         /*
 1499          * To ensure that this is printed in one piece,
 1500          * mask out CAM interrupts.
 1501          */
 1502         s = splsoftcam();
 1503         printf("%s%d at %s%d bus %d target %d lun %d\n",
 1504                periph->periph_name, periph->unit_number,
 1505                path->bus->sim->sim_name,
 1506                path->bus->sim->unit_number,
 1507                path->bus->sim->bus_id,
 1508                path->target->target_id,
 1509                path->device->lun_id);
 1510         printf("%s%d: ", periph->periph_name, periph->unit_number);
 1511         scsi_print_inquiry(&path->device->inq_data);
 1512         if ((bootverbose)
 1513          && (path->device->serial_num_len > 0)) {
 1514                 /* Don't wrap the screen  - print only the first 60 chars */
 1515                 printf("%s%d: Serial Number %.60s\n", periph->periph_name,
 1516                        periph->unit_number, path->device->serial_num);
 1517         }
 1518         xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1);
 1519         cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
 1520         cts.flags = CCB_TRANS_CURRENT_SETTINGS;
 1521         xpt_action((union ccb*)&cts);
 1522         if (cts.ccb_h.status == CAM_REQ_CMP) {
 1523                 u_int speed;
 1524                 u_int freq;
 1525 
 1526                 if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
 1527                   && cts.sync_offset != 0) {
 1528                         freq = scsi_calc_syncsrate(cts.sync_period);
 1529                         speed = freq;
 1530                 } else {
 1531                         struct ccb_pathinq cpi;
 1532 
 1533                         /* Ask the SIM for its base transfer speed */
 1534                         xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
 1535                         cpi.ccb_h.func_code = XPT_PATH_INQ;
 1536                         xpt_action((union ccb *)&cpi);
 1537 
 1538                         speed = cpi.base_transfer_speed;
 1539                         freq = 0;
 1540                 }
 1541                 if ((cts.valid & CCB_TRANS_BUS_WIDTH_VALID) != 0)
 1542                         speed *= (0x01 << cts.bus_width);
 1543                 mb = speed / 1000;
 1544                 if (mb > 0)
 1545                         printf("%s%d: %d.%03dMB/s transfers",
 1546                                periph->periph_name, periph->unit_number,
 1547                                mb, speed % 1000);
 1548                 else
 1549                         printf("%s%d: %dKB/s transfers", periph->periph_name,
 1550                                periph->unit_number, speed);
 1551                 if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
 1552                  && cts.sync_offset != 0) {
 1553                         printf(" (%d.%03dMHz, offset %d", freq / 1000,
 1554                                freq % 1000, cts.sync_offset);
 1555                 }
 1556                 if ((cts.valid & CCB_TRANS_BUS_WIDTH_VALID) != 0
 1557                  && cts.bus_width > 0) {
 1558                         if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
 1559                          && cts.sync_offset != 0) {
 1560                                 printf(", ");
 1561                         } else {
 1562                                 printf(" (");
 1563                         }
 1564                         printf("%dbit)", 8 * (0x01 << cts.bus_width));
 1565                 } else if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
 1566                         && cts.sync_offset != 0) {
 1567                         printf(")");
 1568                 }
 1569 
 1570                 if (path->device->inq_flags & SID_CmdQue
 1571                  || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
 1572                         printf(", Tagged Queueing Enabled");
 1573                 }
 1574 
 1575                 printf("\n");
 1576         } else if (path->device->inq_flags & SID_CmdQue
 1577                 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
 1578                 printf("%s%d: Tagged Queueing Enabled\n",
 1579                        periph->periph_name, periph->unit_number);
 1580         }
 1581 
 1582         /*
 1583          * We only want to print the caller's announce string if they've
 1584          * passed one in..
 1585          */
 1586         if (announce_string != NULL)
 1587                 printf("%s%d: %s\n", periph->periph_name,
 1588                        periph->unit_number, announce_string);
 1589         splx(s);
 1590 }
 1591 
 1592 
 1593 static dev_match_ret
 1594 xptbusmatch(struct dev_match_pattern *patterns, int num_patterns,
 1595             struct cam_eb *bus)
 1596 {
 1597         dev_match_ret retval;
 1598         int i;
 1599 
 1600         retval = DM_RET_NONE;
 1601 
 1602         /*
 1603          * If we aren't given something to match against, that's an error.
 1604          */
 1605         if (bus == NULL)
 1606                 return(DM_RET_ERROR);
 1607 
 1608         /*
 1609          * If there are no match entries, then this bus matches no
 1610          * matter what.
 1611          */
 1612         if ((patterns == NULL) || (num_patterns == 0))
 1613                 return(DM_RET_DESCEND | DM_RET_COPY);
 1614 
 1615         for (i = 0; i < num_patterns; i++) {
 1616                 struct bus_match_pattern *cur_pattern;
 1617 
 1618                 /*
 1619                  * If the pattern in question isn't for a bus node, we
 1620                  * aren't interested.  However, we do indicate to the
 1621                  * calling routine that we should continue descending the
 1622                  * tree, since the user wants to match against lower-level
 1623                  * EDT elements.
 1624                  */
 1625                 if (patterns[i].type != DEV_MATCH_BUS) {
 1626                         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
 1627                                 retval |= DM_RET_DESCEND;
 1628                         continue;
 1629                 }
 1630 
 1631                 cur_pattern = &patterns[i].pattern.bus_pattern;
 1632 
 1633                 /*
 1634                  * If they want to match any bus node, we give them any
 1635                  * device node.
 1636                  */
 1637                 if (cur_pattern->flags == BUS_MATCH_ANY) {
 1638                         /* set the copy flag */
 1639                         retval |= DM_RET_COPY;
 1640 
 1641                         /*
 1642                          * If we've already decided on an action, go ahead
 1643                          * and return.
 1644                          */
 1645                         if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
 1646                                 return(retval);
 1647                 }
 1648 
 1649                 /*
 1650                  * Not sure why someone would do this...
 1651                  */
 1652                 if (cur_pattern->flags == BUS_MATCH_NONE)
 1653                         continue;
 1654 
 1655                 if (((cur_pattern->flags & BUS_MATCH_PATH) != 0)
 1656                  && (cur_pattern->path_id != bus->path_id))
 1657                         continue;
 1658 
 1659                 if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0)
 1660                  && (cur_pattern->bus_id != bus->sim->bus_id))
 1661                         continue;
 1662 
 1663                 if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0)
 1664                  && (cur_pattern->unit_number != bus->sim->unit_number))
 1665                         continue;
 1666 
 1667                 if (((cur_pattern->flags & BUS_MATCH_NAME) != 0)
 1668                  && (strncmp(cur_pattern->dev_name, bus->sim->sim_name,
 1669                              DEV_IDLEN) != 0))
 1670                         continue;
 1671 
 1672                 /*
 1673                  * If we get to this point, the user definitely wants 
 1674                  * information on this bus.  So tell the caller to copy the
 1675                  * data out.
 1676                  */
 1677                 retval |= DM_RET_COPY;
 1678 
 1679                 /*
 1680                  * If the return action has been set to descend, then we
 1681                  * know that we've already seen a non-bus matching
 1682                  * expression, therefore we need to further descend the tree.
 1683                  * This won't change by continuing around the loop, so we
 1684                  * go ahead and return.  If we haven't seen a non-bus
 1685                  * matching expression, we keep going around the loop until
 1686                  * we exhaust the matching expressions.  We'll set the stop
 1687                  * flag once we fall out of the loop.
 1688                  */
 1689                 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
 1690                         return(retval);
 1691         }
 1692 
 1693         /*
 1694          * If the return action hasn't been set to descend yet, that means
 1695          * we haven't seen anything other than bus matching patterns.  So
 1696          * tell the caller to stop descending the tree -- the user doesn't
 1697          * want to match against lower level tree elements.
 1698          */
 1699         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
 1700                 retval |= DM_RET_STOP;
 1701 
 1702         return(retval);
 1703 }
 1704 
 1705 static dev_match_ret
 1706 xptdevicematch(struct dev_match_pattern *patterns, int num_patterns,
 1707                struct cam_ed *device)
 1708 {
 1709         dev_match_ret retval;
 1710         int i;
 1711 
 1712         retval = DM_RET_NONE;
 1713 
 1714         /*
 1715          * If we aren't given something to match against, that's an error.
 1716          */
 1717         if (device == NULL)
 1718                 return(DM_RET_ERROR);
 1719 
 1720         /*
 1721          * If there are no match entries, then this device matches no
 1722          * matter what.
 1723          */
 1724         if ((patterns == NULL) || (patterns == 0))
 1725                 return(DM_RET_DESCEND | DM_RET_COPY);
 1726 
 1727         for (i = 0; i < num_patterns; i++) {
 1728                 struct device_match_pattern *cur_pattern;
 1729 
 1730                 /*
 1731                  * If the pattern in question isn't for a device node, we
 1732                  * aren't interested.
 1733                  */
 1734                 if (patterns[i].type != DEV_MATCH_DEVICE) {
 1735                         if ((patterns[i].type == DEV_MATCH_PERIPH)
 1736                          && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE))
 1737                                 retval |= DM_RET_DESCEND;
 1738                         continue;
 1739                 }
 1740 
 1741                 cur_pattern = &patterns[i].pattern.device_pattern;
 1742 
 1743                 /*
 1744                  * If they want to match any device node, we give them any
 1745                  * device node.
 1746                  */
 1747                 if (cur_pattern->flags == DEV_MATCH_ANY) {
 1748                         /* set the copy flag */
 1749                         retval |= DM_RET_COPY;
 1750 
 1751                         
 1752                         /*
 1753                          * If we've already decided on an action, go ahead
 1754                          * and return.
 1755                          */
 1756                         if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
 1757                                 return(retval);
 1758                 }
 1759 
 1760                 /*
 1761                  * Not sure why someone would do this...
 1762                  */
 1763                 if (cur_pattern->flags == DEV_MATCH_NONE)
 1764                         continue;
 1765 
 1766                 if (((cur_pattern->flags & DEV_MATCH_PATH) != 0)
 1767                  && (cur_pattern->path_id != device->target->bus->path_id))
 1768                         continue;
 1769 
 1770                 if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0)
 1771                  && (cur_pattern->target_id != device->target->target_id))
 1772                         continue;
 1773 
 1774                 if (((cur_pattern->flags & DEV_MATCH_LUN) != 0)
 1775                  && (cur_pattern->target_lun != device->lun_id))
 1776                         continue;
 1777 
 1778                 if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0)
 1779                  && (cam_quirkmatch((caddr_t)&device->inq_data,
 1780                                     (caddr_t)&cur_pattern->inq_pat,
 1781                                     1, sizeof(cur_pattern->inq_pat),
 1782                                     scsi_static_inquiry_match) == NULL))
 1783                         continue;
 1784 
 1785                 /*
 1786                  * If we get to this point, the user definitely wants 
 1787                  * information on this device.  So tell the caller to copy
 1788                  * the data out.
 1789                  */
 1790                 retval |= DM_RET_COPY;
 1791 
 1792                 /*
 1793                  * If the return action has been set to descend, then we
 1794                  * know that we've already seen a peripheral matching
 1795                  * expression, therefore we need to further descend the tree.
 1796                  * This won't change by continuing around the loop, so we
 1797                  * go ahead and return.  If we haven't seen a peripheral
 1798                  * matching expression, we keep going around the loop until
 1799                  * we exhaust the matching expressions.  We'll set the stop
 1800                  * flag once we fall out of the loop.
 1801                  */
 1802                 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
 1803                         return(retval);
 1804         }
 1805 
 1806         /*
 1807          * If the return action hasn't been set to descend yet, that means
 1808          * we haven't seen any peripheral matching patterns.  So tell the
 1809          * caller to stop descending the tree -- the user doesn't want to
 1810          * match against lower level tree elements.
 1811          */
 1812         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
 1813                 retval |= DM_RET_STOP;
 1814 
 1815         return(retval);
 1816 }
 1817 
 1818 /*
 1819  * Match a single peripheral against any number of match patterns.
 1820  */
 1821 static dev_match_ret
 1822 xptperiphmatch(struct dev_match_pattern *patterns, int num_patterns,
 1823                struct cam_periph *periph)
 1824 {
 1825         dev_match_ret retval;
 1826         int i;
 1827 
 1828         /*
 1829          * If we aren't given something to match against, that's an error.
 1830          */
 1831         if (periph == NULL)
 1832                 return(DM_RET_ERROR);
 1833 
 1834         /*
 1835          * If there are no match entries, then this peripheral matches no
 1836          * matter what.
 1837          */
 1838         if ((patterns == NULL) || (num_patterns == 0))
 1839                 return(DM_RET_STOP | DM_RET_COPY);
 1840 
 1841         /*
 1842          * There aren't any nodes below a peripheral node, so there's no
 1843          * reason to descend the tree any further.
 1844          */
 1845         retval = DM_RET_STOP;
 1846 
 1847         for (i = 0; i < num_patterns; i++) {
 1848                 struct periph_match_pattern *cur_pattern;
 1849 
 1850                 /*
 1851                  * If the pattern in question isn't for a peripheral, we
 1852                  * aren't interested.
 1853                  */
 1854                 if (patterns[i].type != DEV_MATCH_PERIPH)
 1855                         continue;
 1856 
 1857                 cur_pattern = &patterns[i].pattern.periph_pattern;
 1858 
 1859                 /*
 1860                  * If they want to match on anything, then we will do so.
 1861                  */
 1862                 if (cur_pattern->flags == PERIPH_MATCH_ANY) {
 1863                         /* set the copy flag */
 1864                         retval |= DM_RET_COPY;
 1865 
 1866                         /*
 1867                          * We've already set the return action to stop,
 1868                          * since there are no nodes below peripherals in
 1869                          * the tree.
 1870                          */
 1871                         return(retval);
 1872                 }
 1873 
 1874                 /*
 1875                  * Not sure why someone would do this...
 1876                  */
 1877                 if (cur_pattern->flags == PERIPH_MATCH_NONE)
 1878                         continue;
 1879 
 1880                 if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0)
 1881                  && (cur_pattern->path_id != periph->path->bus->path_id))
 1882                         continue;
 1883 
 1884                 /*
 1885                  * For the target and lun id's, we have to make sure the
 1886                  * target and lun pointers aren't NULL.  The xpt peripheral
 1887                  * has a wildcard target and device.
 1888                  */
 1889                 if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0)
 1890                  && ((periph->path->target == NULL)
 1891                  ||(cur_pattern->target_id != periph->path->target->target_id)))
 1892                         continue;
 1893 
 1894                 if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0)
 1895                  && ((periph->path->device == NULL)
 1896                  || (cur_pattern->target_lun != periph->path->device->lun_id)))
 1897                         continue;
 1898 
 1899                 if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0)
 1900                  && (cur_pattern->unit_number != periph->unit_number))
 1901                         continue;
 1902 
 1903                 if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0)
 1904                  && (strncmp(cur_pattern->periph_name, periph->periph_name,
 1905                              DEV_IDLEN) != 0))
 1906                         continue;
 1907 
 1908                 /*
 1909                  * If we get to this point, the user definitely wants 
 1910                  * information on this peripheral.  So tell the caller to
 1911                  * copy the data out.
 1912                  */
 1913                 retval |= DM_RET_COPY;
 1914 
 1915                 /*
 1916                  * The return action has already been set to stop, since
 1917                  * peripherals don't have any nodes below them in the EDT.
 1918                  */
 1919                 return(retval);
 1920         }
 1921 
 1922         /*
 1923          * If we get to this point, the peripheral that was passed in
 1924          * doesn't match any of the patterns.
 1925          */
 1926         return(retval);
 1927 }
 1928 
 1929 static int
 1930 xptedtbusfunc(struct cam_eb *bus, void *arg)
 1931 {
 1932         struct ccb_dev_match *cdm;
 1933         dev_match_ret retval;
 1934 
 1935         cdm = (struct ccb_dev_match *)arg;
 1936 
 1937         /*
 1938          * If our position is for something deeper in the tree, that means
 1939          * that we've already seen this node.  So, we keep going down.
 1940          */
 1941         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 1942          && (cdm->pos.cookie.bus == bus)
 1943          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 1944          && (cdm->pos.cookie.target != NULL))
 1945                 retval = DM_RET_DESCEND;
 1946         else
 1947                 retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus);
 1948 
 1949         /*
 1950          * If we got an error, bail out of the search.
 1951          */
 1952         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
 1953                 cdm->status = CAM_DEV_MATCH_ERROR;
 1954                 return(0);
 1955         }
 1956 
 1957         /*
 1958          * If the copy flag is set, copy this bus out.
 1959          */
 1960         if (retval & DM_RET_COPY) {
 1961                 int spaceleft, j;
 1962 
 1963                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
 1964                         sizeof(struct dev_match_result));
 1965 
 1966                 /*
 1967                  * If we don't have enough space to put in another
 1968                  * match result, save our position and tell the
 1969                  * user there are more devices to check.
 1970                  */
 1971                 if (spaceleft < sizeof(struct dev_match_result)) {
 1972                         bzero(&cdm->pos, sizeof(cdm->pos));
 1973                         cdm->pos.position_type = 
 1974                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS;
 1975 
 1976                         cdm->pos.cookie.bus = bus;
 1977                         cdm->pos.generations[CAM_BUS_GENERATION]=
 1978                                 bus_generation;
 1979                         cdm->status = CAM_DEV_MATCH_MORE;
 1980                         return(0);
 1981                 }
 1982                 j = cdm->num_matches;
 1983                 cdm->num_matches++;
 1984                 cdm->matches[j].type = DEV_MATCH_BUS;
 1985                 cdm->matches[j].result.bus_result.path_id = bus->path_id;
 1986                 cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id;
 1987                 cdm->matches[j].result.bus_result.unit_number =
 1988                         bus->sim->unit_number;
 1989                 strncpy(cdm->matches[j].result.bus_result.dev_name,
 1990                         bus->sim->sim_name, DEV_IDLEN);
 1991         }
 1992 
 1993         /*
 1994          * If the user is only interested in busses, there's no
 1995          * reason to descend to the next level in the tree.
 1996          */
 1997         if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
 1998                 return(1);
 1999 
 2000         /*
 2001          * If there is a target generation recorded, check it to
 2002          * make sure the target list hasn't changed.
 2003          */
 2004         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 2005          && (bus == cdm->pos.cookie.bus)
 2006          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 2007          && (cdm->pos.generations[CAM_TARGET_GENERATION] != 0)
 2008          && (cdm->pos.generations[CAM_TARGET_GENERATION] !=
 2009              bus->generation)) {
 2010                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
 2011                 return(0);
 2012         }
 2013 
 2014         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 2015          && (cdm->pos.cookie.bus == bus)
 2016          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 2017          && (cdm->pos.cookie.target != NULL))
 2018                 return(xpttargettraverse(bus,
 2019                                         (struct cam_et *)cdm->pos.cookie.target,
 2020                                          xptedttargetfunc, arg));
 2021         else
 2022                 return(xpttargettraverse(bus, NULL, xptedttargetfunc, arg));
 2023 }
 2024 
 2025 static int
 2026 xptedttargetfunc(struct cam_et *target, void *arg)
 2027 {
 2028         struct ccb_dev_match *cdm;
 2029 
 2030         cdm = (struct ccb_dev_match *)arg;
 2031 
 2032         /*
 2033          * If there is a device list generation recorded, check it to
 2034          * make sure the device list hasn't changed.
 2035          */
 2036         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 2037          && (cdm->pos.cookie.bus == target->bus)
 2038          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 2039          && (cdm->pos.cookie.target == target)
 2040          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
 2041          && (cdm->pos.generations[CAM_DEV_GENERATION] != 0)
 2042          && (cdm->pos.generations[CAM_DEV_GENERATION] !=
 2043              target->generation)) {
 2044                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
 2045                 return(0);
 2046         }
 2047 
 2048         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 2049          && (cdm->pos.cookie.bus == target->bus)
 2050          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 2051          && (cdm->pos.cookie.target == target)
 2052          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
 2053          && (cdm->pos.cookie.device != NULL))
 2054                 return(xptdevicetraverse(target,
 2055                                         (struct cam_ed *)cdm->pos.cookie.device,
 2056                                          xptedtdevicefunc, arg));
 2057         else
 2058                 return(xptdevicetraverse(target, NULL, xptedtdevicefunc, arg));
 2059 }
 2060 
 2061 static int
 2062 xptedtdevicefunc(struct cam_ed *device, void *arg)
 2063 {
 2064 
 2065         struct ccb_dev_match *cdm;
 2066         dev_match_ret retval;
 2067 
 2068         cdm = (struct ccb_dev_match *)arg;
 2069 
 2070         /*
 2071          * If our position is for something deeper in the tree, that means
 2072          * that we've already seen this node.  So, we keep going down.
 2073          */
 2074         if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE)
 2075          && (cdm->pos.cookie.device == device)
 2076          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
 2077          && (cdm->pos.cookie.periph != NULL))
 2078                 retval = DM_RET_DESCEND;
 2079         else
 2080                 retval = xptdevicematch(cdm->patterns, cdm->num_patterns,
 2081                                         device);
 2082 
 2083         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
 2084                 cdm->status = CAM_DEV_MATCH_ERROR;
 2085                 return(0);
 2086         }
 2087 
 2088         /*
 2089          * If the copy flag is set, copy this device out.
 2090          */
 2091         if (retval & DM_RET_COPY) {
 2092                 int spaceleft, j;
 2093 
 2094                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
 2095                         sizeof(struct dev_match_result));
 2096 
 2097                 /*
 2098                  * If we don't have enough space to put in another
 2099                  * match result, save our position and tell the
 2100                  * user there are more devices to check.
 2101                  */
 2102                 if (spaceleft < sizeof(struct dev_match_result)) {
 2103                         bzero(&cdm->pos, sizeof(cdm->pos));
 2104                         cdm->pos.position_type = 
 2105                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
 2106                                 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE;
 2107 
 2108                         cdm->pos.cookie.bus = device->target->bus;
 2109                         cdm->pos.generations[CAM_BUS_GENERATION]=
 2110                                 bus_generation;
 2111                         cdm->pos.cookie.target = device->target;
 2112                         cdm->pos.generations[CAM_TARGET_GENERATION] =
 2113                                 device->target->bus->generation;
 2114                         cdm->pos.cookie.device = device;
 2115                         cdm->pos.generations[CAM_DEV_GENERATION] = 
 2116                                 device->target->generation;
 2117                         cdm->status = CAM_DEV_MATCH_MORE;
 2118                         return(0);
 2119                 }
 2120                 j = cdm->num_matches;
 2121                 cdm->num_matches++;
 2122                 cdm->matches[j].type = DEV_MATCH_DEVICE;
 2123                 cdm->matches[j].result.device_result.path_id =
 2124                         device->target->bus->path_id;
 2125                 cdm->matches[j].result.device_result.target_id =
 2126                         device->target->target_id;
 2127                 cdm->matches[j].result.device_result.target_lun =
 2128                         device->lun_id;
 2129                 bcopy(&device->inq_data,
 2130                       &cdm->matches[j].result.device_result.inq_data,
 2131                       sizeof(struct scsi_inquiry_data));
 2132 
 2133                 /* Let the user know whether this device is unconfigured */
 2134                 if (device->flags & CAM_DEV_UNCONFIGURED)
 2135                         cdm->matches[j].result.device_result.flags =
 2136                                 DEV_RESULT_UNCONFIGURED;
 2137                 else
 2138                         cdm->matches[j].result.device_result.flags =
 2139                                 DEV_RESULT_NOFLAG;
 2140         }
 2141 
 2142         /*
 2143          * If the user isn't interested in peripherals, don't descend
 2144          * the tree any further.
 2145          */
 2146         if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
 2147                 return(1);
 2148 
 2149         /*
 2150          * If there is a peripheral list generation recorded, make sure
 2151          * it hasn't changed.
 2152          */
 2153         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 2154          && (device->target->bus == cdm->pos.cookie.bus)
 2155          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 2156          && (device->target == cdm->pos.cookie.target)
 2157          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
 2158          && (device == cdm->pos.cookie.device)
 2159          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
 2160          && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
 2161          && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
 2162              device->generation)){
 2163                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
 2164                 return(0);
 2165         }
 2166 
 2167         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 2168          && (cdm->pos.cookie.bus == device->target->bus)
 2169          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 2170          && (cdm->pos.cookie.target == device->target)
 2171          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
 2172          && (cdm->pos.cookie.device == device)
 2173          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
 2174          && (cdm->pos.cookie.periph != NULL))
 2175                 return(xptperiphtraverse(device,
 2176                                 (struct cam_periph *)cdm->pos.cookie.periph,
 2177                                 xptedtperiphfunc, arg));
 2178         else
 2179                 return(xptperiphtraverse(device, NULL, xptedtperiphfunc, arg));
 2180 }
 2181 
 2182 static int
 2183 xptedtperiphfunc(struct cam_periph *periph, void *arg)
 2184 {
 2185         struct ccb_dev_match *cdm;
 2186         dev_match_ret retval;
 2187 
 2188         cdm = (struct ccb_dev_match *)arg;
 2189 
 2190         retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
 2191 
 2192         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
 2193                 cdm->status = CAM_DEV_MATCH_ERROR;
 2194                 return(0);
 2195         }
 2196 
 2197         /*
 2198          * If the copy flag is set, copy this peripheral out.
 2199          */
 2200         if (retval & DM_RET_COPY) {
 2201                 int spaceleft, j;
 2202 
 2203                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
 2204                         sizeof(struct dev_match_result));
 2205 
 2206                 /*
 2207                  * If we don't have enough space to put in another
 2208                  * match result, save our position and tell the
 2209                  * user there are more devices to check.
 2210                  */
 2211                 if (spaceleft < sizeof(struct dev_match_result)) {
 2212                         bzero(&cdm->pos, sizeof(cdm->pos));
 2213                         cdm->pos.position_type = 
 2214                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
 2215                                 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE |
 2216                                 CAM_DEV_POS_PERIPH;
 2217 
 2218                         cdm->pos.cookie.bus = periph->path->bus;
 2219                         cdm->pos.generations[CAM_BUS_GENERATION]=
 2220                                 bus_generation;
 2221                         cdm->pos.cookie.target = periph->path->target;
 2222                         cdm->pos.generations[CAM_TARGET_GENERATION] =
 2223                                 periph->path->bus->generation;
 2224                         cdm->pos.cookie.device = periph->path->device;
 2225                         cdm->pos.generations[CAM_DEV_GENERATION] = 
 2226                                 periph->path->target->generation;
 2227                         cdm->pos.cookie.periph = periph;
 2228                         cdm->pos.generations[CAM_PERIPH_GENERATION] =
 2229                                 periph->path->device->generation;
 2230                         cdm->status = CAM_DEV_MATCH_MORE;
 2231                         return(0);
 2232                 }
 2233 
 2234                 j = cdm->num_matches;
 2235                 cdm->num_matches++;
 2236                 cdm->matches[j].type = DEV_MATCH_PERIPH;
 2237                 cdm->matches[j].result.periph_result.path_id =
 2238                         periph->path->bus->path_id;
 2239                 cdm->matches[j].result.periph_result.target_id =
 2240                         periph->path->target->target_id;
 2241                 cdm->matches[j].result.periph_result.target_lun =
 2242                         periph->path->device->lun_id;
 2243                 cdm->matches[j].result.periph_result.unit_number =
 2244                         periph->unit_number;
 2245                 strncpy(cdm->matches[j].result.periph_result.periph_name,
 2246                         periph->periph_name, DEV_IDLEN);
 2247         }
 2248 
 2249         return(1);
 2250 }
 2251 
 2252 static int
 2253 xptedtmatch(struct ccb_dev_match *cdm)
 2254 {
 2255         int ret;
 2256 
 2257         cdm->num_matches = 0;
 2258 
 2259         /*
 2260          * Check the bus list generation.  If it has changed, the user
 2261          * needs to reset everything and start over.
 2262          */
 2263         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 2264          && (cdm->pos.generations[CAM_BUS_GENERATION] != 0)
 2265          && (cdm->pos.generations[CAM_BUS_GENERATION] != bus_generation)) {
 2266                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
 2267                 return(0);
 2268         }
 2269 
 2270         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 2271          && (cdm->pos.cookie.bus != NULL))
 2272                 ret = xptbustraverse((struct cam_eb *)cdm->pos.cookie.bus,
 2273                                      xptedtbusfunc, cdm);
 2274         else
 2275                 ret = xptbustraverse(NULL, xptedtbusfunc, cdm);
 2276 
 2277         /*
 2278          * If we get back 0, that means that we had to stop before fully
 2279          * traversing the EDT.  It also means that one of the subroutines
 2280          * has set the status field to the proper value.  If we get back 1,
 2281          * we've fully traversed the EDT and copied out any matching entries.
 2282          */
 2283         if (ret == 1)
 2284                 cdm->status = CAM_DEV_MATCH_LAST;
 2285 
 2286         return(ret);
 2287 }
 2288 
 2289 static int
 2290 xptplistpdrvfunc(struct periph_driver **pdrv, void *arg)
 2291 {
 2292         struct ccb_dev_match *cdm;
 2293 
 2294         cdm = (struct ccb_dev_match *)arg;
 2295 
 2296         if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
 2297          && (cdm->pos.cookie.pdrv == pdrv)
 2298          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
 2299          && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
 2300          && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
 2301              (*pdrv)->generation)) {
 2302                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
 2303                 return(0);
 2304         }
 2305 
 2306         if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
 2307          && (cdm->pos.cookie.pdrv == pdrv)
 2308          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
 2309          && (cdm->pos.cookie.periph != NULL))
 2310                 return(xptpdperiphtraverse(pdrv,
 2311                                 (struct cam_periph *)cdm->pos.cookie.periph,
 2312                                 xptplistperiphfunc, arg));
 2313         else
 2314                 return(xptpdperiphtraverse(pdrv, NULL,xptplistperiphfunc, arg));
 2315 }
 2316 
 2317 static int
 2318 xptplistperiphfunc(struct cam_periph *periph, void *arg)
 2319 {
 2320         struct ccb_dev_match *cdm;
 2321         dev_match_ret retval;
 2322 
 2323         cdm = (struct ccb_dev_match *)arg;
 2324 
 2325         retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
 2326 
 2327         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
 2328                 cdm->status = CAM_DEV_MATCH_ERROR;
 2329                 return(0);
 2330         }
 2331 
 2332         /*
 2333          * If the copy flag is set, copy this peripheral out.
 2334          */
 2335         if (retval & DM_RET_COPY) {
 2336                 int spaceleft, j;
 2337 
 2338                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
 2339                         sizeof(struct dev_match_result));
 2340 
 2341                 /*
 2342                  * If we don't have enough space to put in another
 2343                  * match result, save our position and tell the
 2344                  * user there are more devices to check.
 2345                  */
 2346                 if (spaceleft < sizeof(struct dev_match_result)) {
 2347                         struct periph_driver **pdrv;
 2348 
 2349                         pdrv = NULL;
 2350                         bzero(&cdm->pos, sizeof(cdm->pos));
 2351                         cdm->pos.position_type = 
 2352                                 CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR |
 2353                                 CAM_DEV_POS_PERIPH;
 2354 
 2355                         /*
 2356                          * This may look a bit non-sensical, but it is
 2357                          * actually quite logical.  There are very few
 2358                          * peripheral drivers, and bloating every peripheral
 2359                          * structure with a pointer back to its parent
 2360                          * peripheral driver linker set entry would cost
 2361                          * more in the long run than doing this quick lookup.
 2362                          */
 2363                         for (pdrv =
 2364                              (struct periph_driver **)periphdriver_set.ls_items;
 2365                              *pdrv != NULL; pdrv++) {
 2366                                 if (strcmp((*pdrv)->driver_name,
 2367                                     periph->periph_name) == 0)
 2368                                         break;
 2369                         }
 2370 
 2371                         if (pdrv == NULL) {
 2372                                 cdm->status = CAM_DEV_MATCH_ERROR;
 2373                                 return(0);
 2374                         }
 2375 
 2376                         cdm->pos.cookie.pdrv = pdrv;
 2377                         /*
 2378                          * The periph generation slot does double duty, as
 2379                          * does the periph pointer slot.  They are used for
 2380                          * both edt and pdrv lookups and positioning.
 2381                          */
 2382                         cdm->pos.cookie.periph = periph;
 2383                         cdm->pos.generations[CAM_PERIPH_GENERATION] =
 2384                                 (*pdrv)->generation;
 2385                         cdm->status = CAM_DEV_MATCH_MORE;
 2386                         return(0);
 2387                 }
 2388 
 2389                 j = cdm->num_matches;
 2390                 cdm->num_matches++;
 2391                 cdm->matches[j].type = DEV_MATCH_PERIPH;
 2392                 cdm->matches[j].result.periph_result.path_id =
 2393                         periph->path->bus->path_id;
 2394 
 2395                 /*
 2396                  * The transport layer peripheral doesn't have a target or
 2397                  * lun.
 2398                  */
 2399                 if (periph->path->target)
 2400                         cdm->matches[j].result.periph_result.target_id =
 2401                                 periph->path->target->target_id;
 2402                 else
 2403                         cdm->matches[j].result.periph_result.target_id = -1;
 2404 
 2405                 if (periph->path->device)
 2406                         cdm->matches[j].result.periph_result.target_lun =
 2407                                 periph->path->device->lun_id;
 2408                 else
 2409                         cdm->matches[j].result.periph_result.target_lun = -1;
 2410 
 2411                 cdm->matches[j].result.periph_result.unit_number =
 2412                         periph->unit_number;
 2413                 strncpy(cdm->matches[j].result.periph_result.periph_name,
 2414                         periph->periph_name, DEV_IDLEN);
 2415         }
 2416 
 2417         return(1);
 2418 }
 2419 
 2420 static int
 2421 xptperiphlistmatch(struct ccb_dev_match *cdm)
 2422 {
 2423         int ret;
 2424 
 2425         cdm->num_matches = 0;
 2426 
 2427         /*
 2428          * At this point in the edt traversal function, we check the bus
 2429          * list generation to make sure that no busses have been added or
 2430          * removed since the user last sent a XPT_DEV_MATCH ccb through.
 2431          * For the peripheral driver list traversal function, however, we
 2432          * don't have to worry about new peripheral driver types coming or
 2433          * going; they're in a linker set, and therefore can't change
 2434          * without a recompile.
 2435          */
 2436 
 2437         if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
 2438          && (cdm->pos.cookie.pdrv != NULL))
 2439                 ret = xptpdrvtraverse(
 2440                                 (struct periph_driver **)cdm->pos.cookie.pdrv,
 2441                                 xptplistpdrvfunc, cdm);
 2442         else
 2443                 ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm);
 2444 
 2445         /*
 2446          * If we get back 0, that means that we had to stop before fully
 2447          * traversing the peripheral driver tree.  It also means that one of
 2448          * the subroutines has set the status field to the proper value.  If
 2449          * we get back 1, we've fully traversed the EDT and copied out any
 2450          * matching entries.
 2451          */
 2452         if (ret == 1)
 2453                 cdm->status = CAM_DEV_MATCH_LAST;
 2454 
 2455         return(ret);
 2456 }
 2457 
 2458 static int
 2459 xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg)
 2460 {
 2461         struct cam_eb *bus, *next_bus;
 2462         int retval;
 2463 
 2464         retval = 1;
 2465 
 2466         for (bus = (start_bus ? start_bus : TAILQ_FIRST(&xpt_busses));
 2467              bus != NULL;
 2468              bus = next_bus) {
 2469                 next_bus = TAILQ_NEXT(bus, links);
 2470 
 2471                 retval = tr_func(bus, arg);
 2472                 if (retval == 0)
 2473                         return(retval);
 2474         }
 2475 
 2476         return(retval);
 2477 }
 2478 
 2479 static int
 2480 xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target,
 2481                   xpt_targetfunc_t *tr_func, void *arg)
 2482 {
 2483         struct cam_et *target, *next_target;
 2484         int retval;
 2485 
 2486         retval = 1;
 2487         for (target = (start_target ? start_target :
 2488                        TAILQ_FIRST(&bus->et_entries));
 2489              target != NULL; target = next_target) {
 2490 
 2491                 next_target = TAILQ_NEXT(target, links);
 2492 
 2493                 retval = tr_func(target, arg);
 2494 
 2495                 if (retval == 0)
 2496                         return(retval);
 2497         }
 2498 
 2499         return(retval);
 2500 }
 2501 
 2502 static int
 2503 xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device,
 2504                   xpt_devicefunc_t *tr_func, void *arg)
 2505 {
 2506         struct cam_ed *device, *next_device;
 2507         int retval;
 2508 
 2509         retval = 1;
 2510         for (device = (start_device ? start_device :
 2511                        TAILQ_FIRST(&target->ed_entries));
 2512              device != NULL;
 2513              device = next_device) {
 2514 
 2515                 next_device = TAILQ_NEXT(device, links);
 2516 
 2517                 retval = tr_func(device, arg);
 2518 
 2519                 if (retval == 0)
 2520                         return(retval);
 2521         }
 2522 
 2523         return(retval);
 2524 }
 2525 
 2526 static int
 2527 xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph,
 2528                   xpt_periphfunc_t *tr_func, void *arg)
 2529 {
 2530         struct cam_periph *periph, *next_periph;
 2531         int retval;
 2532 
 2533         retval = 1;
 2534 
 2535         for (periph = (start_periph ? start_periph :
 2536                        SLIST_FIRST(&device->periphs));
 2537              periph != NULL;
 2538              periph = next_periph) {
 2539 
 2540                 next_periph = SLIST_NEXT(periph, periph_links);
 2541 
 2542                 retval = tr_func(periph, arg);
 2543                 if (retval == 0)
 2544                         return(retval);
 2545         }
 2546 
 2547         return(retval);
 2548 }
 2549 
 2550 static int
 2551 xptpdrvtraverse(struct periph_driver **start_pdrv,
 2552                 xpt_pdrvfunc_t *tr_func, void *arg)
 2553 {
 2554         struct periph_driver **pdrv;
 2555         int retval;
 2556 
 2557         retval = 1;
 2558 
 2559         /*
 2560          * We don't traverse the peripheral driver list like we do the
 2561          * other lists, because it is a linker set, and therefore cannot be
 2562          * changed during runtime.  If the peripheral driver list is ever
 2563          * re-done to be something other than a linker set (i.e. it can
 2564          * change while the system is running), the list traversal should
 2565          * be modified to work like the other traversal functions.
 2566          */
 2567         for (pdrv = (start_pdrv ? start_pdrv :
 2568              (struct periph_driver **)periphdriver_set.ls_items);
 2569              *pdrv != NULL; pdrv++) {
 2570                 retval = tr_func(pdrv, arg);
 2571 
 2572                 if (retval == 0)
 2573                         return(retval);
 2574         }
 2575 
 2576         return(retval);
 2577 }
 2578 
 2579 static int
 2580 xptpdperiphtraverse(struct periph_driver **pdrv,
 2581                     struct cam_periph *start_periph,
 2582                     xpt_periphfunc_t *tr_func, void *arg)
 2583 {
 2584         struct cam_periph *periph, *next_periph;
 2585         int retval;
 2586 
 2587         retval = 1;
 2588 
 2589         for (periph = (start_periph ? start_periph :
 2590              TAILQ_FIRST(&(*pdrv)->units)); periph != NULL;
 2591              periph = next_periph) {
 2592 
 2593                 next_periph = TAILQ_NEXT(periph, unit_links);
 2594 
 2595                 retval = tr_func(periph, arg);
 2596                 if (retval == 0)
 2597                         return(retval);
 2598         }
 2599         return(retval);
 2600 }
 2601 
 2602 static int
 2603 xptdefbusfunc(struct cam_eb *bus, void *arg)
 2604 {
 2605         struct xpt_traverse_config *tr_config;
 2606 
 2607         tr_config = (struct xpt_traverse_config *)arg;
 2608 
 2609         if (tr_config->depth == XPT_DEPTH_BUS) {
 2610                 xpt_busfunc_t *tr_func;
 2611 
 2612                 tr_func = (xpt_busfunc_t *)tr_config->tr_func;
 2613 
 2614                 return(tr_func(bus, tr_config->tr_arg));
 2615         } else
 2616                 return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg));
 2617 }
 2618 
 2619 static int
 2620 xptdeftargetfunc(struct cam_et *target, void *arg)
 2621 {
 2622         struct xpt_traverse_config *tr_config;
 2623 
 2624         tr_config = (struct xpt_traverse_config *)arg;
 2625 
 2626         if (tr_config->depth == XPT_DEPTH_TARGET) {
 2627                 xpt_targetfunc_t *tr_func;
 2628 
 2629                 tr_func = (xpt_targetfunc_t *)tr_config->tr_func;
 2630 
 2631                 return(tr_func(target, tr_config->tr_arg));
 2632         } else
 2633                 return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg));
 2634 }
 2635 
 2636 static int
 2637 xptdefdevicefunc(struct cam_ed *device, void *arg)
 2638 {
 2639         struct xpt_traverse_config *tr_config;
 2640 
 2641         tr_config = (struct xpt_traverse_config *)arg;
 2642 
 2643         if (tr_config->depth == XPT_DEPTH_DEVICE) {
 2644                 xpt_devicefunc_t *tr_func;
 2645 
 2646                 tr_func = (xpt_devicefunc_t *)tr_config->tr_func;
 2647 
 2648                 return(tr_func(device, tr_config->tr_arg));
 2649         } else
 2650                 return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg));
 2651 }
 2652 
 2653 static int
 2654 xptdefperiphfunc(struct cam_periph *periph, void *arg)
 2655 {
 2656         struct xpt_traverse_config *tr_config;
 2657         xpt_periphfunc_t *tr_func;
 2658 
 2659         tr_config = (struct xpt_traverse_config *)arg;
 2660 
 2661         tr_func = (xpt_periphfunc_t *)tr_config->tr_func;
 2662 
 2663         /*
 2664          * Unlike the other default functions, we don't check for depth
 2665          * here.  The peripheral driver level is the last level in the EDT,
 2666          * so if we're here, we should execute the function in question.
 2667          */
 2668         return(tr_func(periph, tr_config->tr_arg));
 2669 }
 2670 
 2671 /*
 2672  * Execute the given function for every bus in the EDT.
 2673  */
 2674 static int
 2675 xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg)
 2676 {
 2677         struct xpt_traverse_config tr_config;
 2678 
 2679         tr_config.depth = XPT_DEPTH_BUS;
 2680         tr_config.tr_func = tr_func;
 2681         tr_config.tr_arg = arg;
 2682 
 2683         return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
 2684 }
 2685 
 2686 #ifdef notusedyet
 2687 /*
 2688  * Execute the given function for every target in the EDT.
 2689  */
 2690 static int
 2691 xpt_for_all_targets(xpt_targetfunc_t *tr_func, void *arg)
 2692 {
 2693         struct xpt_traverse_config tr_config;
 2694 
 2695         tr_config.depth = XPT_DEPTH_TARGET;
 2696         tr_config.tr_func = tr_func;
 2697         tr_config.tr_arg = arg;
 2698 
 2699         return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
 2700 }
 2701 #endif /* notusedyet */
 2702 
 2703 /*
 2704  * Execute the given function for every device in the EDT.
 2705  */
 2706 static int
 2707 xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg)
 2708 {
 2709         struct xpt_traverse_config tr_config;
 2710 
 2711         tr_config.depth = XPT_DEPTH_DEVICE;
 2712         tr_config.tr_func = tr_func;
 2713         tr_config.tr_arg = arg;
 2714 
 2715         return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
 2716 }
 2717 
 2718 #ifdef notusedyet
 2719 /*
 2720  * Execute the given function for every peripheral in the EDT.
 2721  */
 2722 static int
 2723 xpt_for_all_periphs(xpt_periphfunc_t *tr_func, void *arg)
 2724 {
 2725         struct xpt_traverse_config tr_config;
 2726 
 2727         tr_config.depth = XPT_DEPTH_PERIPH;
 2728         tr_config.tr_func = tr_func;
 2729         tr_config.tr_arg = arg;
 2730 
 2731         return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
 2732 }
 2733 #endif /* notusedyet */
 2734 
 2735 static int
 2736 xptsetasyncfunc(struct cam_ed *device, void *arg)
 2737 {
 2738         struct cam_path path;
 2739         struct ccb_getdev cgd;
 2740         struct async_node *cur_entry;
 2741 
 2742         cur_entry = (struct async_node *)arg;
 2743 
 2744         /*
 2745          * Don't report unconfigured devices (Wildcard devs,
 2746          * devices only for target mode, device instances
 2747          * that have been invalidated but are waiting for
 2748          * their last reference count to be released).
 2749          */
 2750         if ((device->flags & CAM_DEV_UNCONFIGURED) != 0)
 2751                 return (1);
 2752 
 2753         xpt_compile_path(&path,
 2754                          NULL,
 2755                          device->target->bus->path_id,
 2756                          device->target->target_id,
 2757                          device->lun_id);
 2758         xpt_setup_ccb(&cgd.ccb_h, &path, /*priority*/1);
 2759         cgd.ccb_h.func_code = XPT_GDEV_TYPE;
 2760         xpt_action((union ccb *)&cgd);
 2761         cur_entry->callback(cur_entry->callback_arg,
 2762                             AC_FOUND_DEVICE,
 2763                             &path, &cgd);
 2764         xpt_release_path(&path);
 2765 
 2766         return(1);
 2767 }
 2768 
 2769 static int
 2770 xptsetasyncbusfunc(struct cam_eb *bus, void *arg)
 2771 {
 2772         struct cam_path path;
 2773         struct ccb_pathinq cpi;
 2774         struct async_node *cur_entry;
 2775 
 2776         cur_entry = (struct async_node *)arg;
 2777 
 2778         xpt_compile_path(&path, /*periph*/NULL,
 2779                          bus->sim->path_id,
 2780                          CAM_TARGET_WILDCARD,
 2781                          CAM_LUN_WILDCARD);
 2782         xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
 2783         cpi.ccb_h.func_code = XPT_PATH_INQ;
 2784         xpt_action((union ccb *)&cpi);
 2785         cur_entry->callback(cur_entry->callback_arg,
 2786                             AC_PATH_REGISTERED,
 2787                             &path, &cpi);
 2788         xpt_release_path(&path);
 2789 
 2790         return(1);
 2791 }
 2792 
 2793 void
 2794 xpt_action(union ccb *start_ccb)
 2795 {
 2796         int iopl;
 2797 
 2798         CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_action\n"));
 2799 
 2800         start_ccb->ccb_h.status = CAM_REQ_INPROG;
 2801 
 2802         iopl = splsoftcam();
 2803         switch (start_ccb->ccb_h.func_code) {
 2804         case XPT_SCSI_IO:
 2805         {
 2806 #ifdef CAMDEBUG
 2807                 char cdb_str[(SCSI_MAX_CDBLEN * 3) + 1];
 2808                 struct cam_path *path;
 2809 
 2810                 path = start_ccb->ccb_h.path;
 2811 #endif
 2812 
 2813                 /*
 2814                  * For the sake of compatibility with SCSI-1
 2815                  * devices that may not understand the identify
 2816                  * message, we include lun information in the
 2817                  * second byte of all commands.  SCSI-1 specifies
 2818                  * that luns are a 3 bit value and reserves only 3
 2819                  * bits for lun information in the CDB.  Later
 2820                  * revisions of the SCSI spec allow for more than 8
 2821                  * luns, but have deprecated lun information in the
 2822                  * CDB.  So, if the lun won't fit, we must omit.
 2823                  *
 2824                  * Also be aware that during initial probing for devices,
 2825                  * the inquiry information is unknown but initialized to 0.
 2826                  * This means that this code will be exercised while probing
 2827                  * devices with an ANSI revision greater than 2.
 2828                  */
 2829                 if (SID_ANSI_REV(&start_ccb->ccb_h.path->device->inq_data) <= 2
 2830                  && start_ccb->ccb_h.target_lun < 8
 2831                  && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) {
 2832 
 2833                         start_ccb->csio.cdb_io.cdb_bytes[1] |=
 2834                             start_ccb->ccb_h.target_lun << 5;
 2835                 }
 2836                 start_ccb->csio.scsi_status = SCSI_STATUS_OK;
 2837                 CAM_DEBUG(path, CAM_DEBUG_CDB,("%s. CDB: %s\n",
 2838                           scsi_op_desc(start_ccb->csio.cdb_io.cdb_bytes[0],
 2839                                        &path->device->inq_data),
 2840                           scsi_cdb_string(start_ccb->csio.cdb_io.cdb_bytes,
 2841                                           cdb_str, sizeof(cdb_str))));
 2842                 /* FALLTHROUGH */
 2843         }
 2844         case XPT_TARGET_IO:
 2845         case XPT_CONT_TARGET_IO:
 2846                 start_ccb->csio.sense_resid = 0;
 2847                 start_ccb->csio.resid = 0;
 2848                 /* FALLTHROUGH */
 2849         case XPT_RESET_DEV:
 2850         case XPT_ENG_EXEC:
 2851         {
 2852                 struct cam_path *path;
 2853                 int s;
 2854                 int runq;
 2855 
 2856                 path = start_ccb->ccb_h.path;
 2857                 s = splsoftcam();
 2858 
 2859                 cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb);
 2860                 if (path->device->qfrozen_cnt == 0)
 2861                         runq = xpt_schedule_dev_sendq(path->bus, path->device);
 2862                 else
 2863                         runq = 0;
 2864                 splx(s);
 2865                 if (runq != 0)
 2866                         xpt_run_dev_sendq(path->bus);
 2867                 break;
 2868         }
 2869         case XPT_SET_TRAN_SETTINGS:
 2870         {
 2871                 xpt_set_transfer_settings(&start_ccb->cts,
 2872                                           start_ccb->ccb_h.path->device,
 2873                                           /*async_update*/FALSE);
 2874                 break;
 2875         }
 2876         case XPT_CALC_GEOMETRY:
 2877         {
 2878                 struct cam_sim *sim;
 2879 
 2880                 /* Filter out garbage */
 2881                 if (start_ccb->ccg.block_size == 0
 2882                  || start_ccb->ccg.volume_size == 0) {
 2883                         start_ccb->ccg.cylinders = 0;
 2884                         start_ccb->ccg.heads = 0;
 2885                         start_ccb->ccg.secs_per_track = 0;
 2886                         start_ccb->ccb_h.status = CAM_REQ_CMP;
 2887                         break;
 2888                 }
 2889 #ifdef PC98
 2890                 /*
 2891                  * In a PC-98 system, geometry translation depens on
 2892                  * the "real" device geometry obtained from mode page 4.
 2893                  * SCSI geometry translation is performed in the
 2894                  * initialization routine of the SCSI BIOS and the result
 2895                  * stored in host memory.  If the translation is available
 2896                  * in host memory, use it.  If not, rely on the default
 2897                  * translation the device driver performs.
 2898                  */
 2899                 if (scsi_da_bios_params(&start_ccb->ccg) != 0) {
 2900                         start_ccb->ccb_h.status = CAM_REQ_CMP;
 2901                         break;
 2902                 }
 2903 #endif
 2904                 sim = start_ccb->ccb_h.path->bus->sim;
 2905                 (*(sim->sim_action))(sim, start_ccb);
 2906                 break;
 2907         }
 2908         case XPT_ABORT:
 2909         {
 2910                 union ccb* abort_ccb;
 2911                 int s;                          
 2912 
 2913                 abort_ccb = start_ccb->cab.abort_ccb;
 2914                 if (XPT_FC_IS_DEV_QUEUED(abort_ccb)) {
 2915 
 2916                         if (abort_ccb->ccb_h.pinfo.index >= 0) {
 2917                                 struct cam_ccbq *ccbq;
 2918 
 2919                                 ccbq = &abort_ccb->ccb_h.path->device->ccbq;
 2920                                 cam_ccbq_remove_ccb(ccbq, abort_ccb);
 2921                                 abort_ccb->ccb_h.status =
 2922                                     CAM_REQ_ABORTED|CAM_DEV_QFRZN;
 2923                                 xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
 2924                                 s = splcam();
 2925                                 xpt_done(abort_ccb);
 2926                                 splx(s);
 2927                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 2928                                 break;
 2929                         }
 2930                         if (abort_ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX
 2931                          && (abort_ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) {
 2932                                 /*
 2933                                  * We've caught this ccb en route to
 2934                                  * the SIM.  Flag it for abort and the
 2935                                  * SIM will do so just before starting
 2936                                  * real work on the CCB.
 2937                                  */
 2938                                 abort_ccb->ccb_h.status =
 2939                                     CAM_REQ_ABORTED|CAM_DEV_QFRZN;
 2940                                 xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
 2941                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 2942                                 break;
 2943                         }
 2944                 } 
 2945                 if (XPT_FC_IS_QUEUED(abort_ccb)
 2946                  && (abort_ccb->ccb_h.pinfo.index == CAM_DONEQ_INDEX)) {
 2947                         /*
 2948                          * It's already completed but waiting
 2949                          * for our SWI to get to it.
 2950                          */
 2951                         start_ccb->ccb_h.status = CAM_UA_ABORT;
 2952                         break;
 2953                 }
 2954                 /*
 2955                  * If we weren't able to take care of the abort request
 2956                  * in the XPT, pass the request down to the SIM for processing.
 2957                  */
 2958                 /* FALLTHROUGH */
 2959         }
 2960         case XPT_ACCEPT_TARGET_IO:
 2961         case XPT_EN_LUN:
 2962         case XPT_IMMED_NOTIFY:
 2963         case XPT_NOTIFY_ACK:
 2964         case XPT_GET_TRAN_SETTINGS:
 2965         case XPT_RESET_BUS:
 2966         {
 2967                 struct cam_sim *sim;
 2968 
 2969                 sim = start_ccb->ccb_h.path->bus->sim;
 2970                 (*(sim->sim_action))(sim, start_ccb);
 2971                 break;
 2972         }
 2973         case XPT_PATH_INQ:
 2974         {
 2975                 struct cam_sim *sim;
 2976 
 2977                 sim = start_ccb->ccb_h.path->bus->sim;
 2978                 (*(sim->sim_action))(sim, start_ccb);
 2979                 break;
 2980         }
 2981         case XPT_PATH_STATS:
 2982                 start_ccb->cpis.last_reset =
 2983                         start_ccb->ccb_h.path->bus->last_reset;
 2984                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 2985                 break;
 2986         case XPT_GDEV_TYPE:
 2987         {
 2988                 struct cam_ed *dev;
 2989                 int s;
 2990 
 2991                 dev = start_ccb->ccb_h.path->device;
 2992                 s = splcam();
 2993                 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
 2994                         start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
 2995                 } else {
 2996                         struct ccb_getdev *cgd;
 2997                         struct cam_eb *bus;
 2998                         struct cam_et *tar;
 2999 
 3000                         cgd = &start_ccb->cgd;
 3001                         bus = cgd->ccb_h.path->bus;
 3002                         tar = cgd->ccb_h.path->target;
 3003                         cgd->inq_data = dev->inq_data;
 3004                         cgd->ccb_h.status = CAM_REQ_CMP;
 3005                         cgd->serial_num_len = dev->serial_num_len;
 3006                         if ((dev->serial_num_len > 0)
 3007                          && (dev->serial_num != NULL))
 3008                                 bcopy(dev->serial_num, cgd->serial_num,
 3009                                       dev->serial_num_len);
 3010                 }
 3011                 splx(s);
 3012                 break; 
 3013         }
 3014         case XPT_GDEV_STATS:
 3015         {
 3016                 struct cam_ed *dev;
 3017                 int s;
 3018 
 3019                 dev = start_ccb->ccb_h.path->device;
 3020                 s = splcam();
 3021                 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
 3022                         start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
 3023                 } else {
 3024                         struct ccb_getdevstats *cgds;
 3025                         struct cam_eb *bus;
 3026                         struct cam_et *tar;
 3027 
 3028                         cgds = &start_ccb->cgds;
 3029                         bus = cgds->ccb_h.path->bus;
 3030                         tar = cgds->ccb_h.path->target;
 3031                         cgds->dev_openings = dev->ccbq.dev_openings;
 3032                         cgds->dev_active = dev->ccbq.dev_active;
 3033                         cgds->devq_openings = dev->ccbq.devq_openings;
 3034                         cgds->devq_queued = dev->ccbq.queue.entries;
 3035                         cgds->held = dev->ccbq.held;
 3036                         cgds->last_reset = tar->last_reset;
 3037                         cgds->maxtags = dev->quirk->maxtags;
 3038                         cgds->mintags = dev->quirk->mintags;
 3039                         if (timevalcmp(&tar->last_reset, &bus->last_reset, <))
 3040                                 cgds->last_reset = bus->last_reset;
 3041                         cgds->ccb_h.status = CAM_REQ_CMP;
 3042                 }
 3043                 splx(s);
 3044                 break;
 3045         }
 3046         case XPT_GDEVLIST:
 3047         {
 3048                 struct cam_periph       *nperiph;
 3049                 struct periph_list      *periph_head;
 3050                 struct ccb_getdevlist   *cgdl;
 3051                 int                     i;
 3052                 int                     s;
 3053                 struct cam_ed           *device;
 3054                 int                     found;
 3055 
 3056 
 3057                 found = 0;
 3058 
 3059                 /*
 3060                  * Don't want anyone mucking with our data.
 3061                  */
 3062                 s = splcam();
 3063                 device = start_ccb->ccb_h.path->device;
 3064                 periph_head = &device->periphs;
 3065                 cgdl = &start_ccb->cgdl;
 3066 
 3067                 /*
 3068                  * Check and see if the list has changed since the user
 3069                  * last requested a list member.  If so, tell them that the
 3070                  * list has changed, and therefore they need to start over 
 3071                  * from the beginning.
 3072                  */
 3073                 if ((cgdl->index != 0) && 
 3074                     (cgdl->generation != device->generation)) {
 3075                         cgdl->status = CAM_GDEVLIST_LIST_CHANGED;
 3076                         splx(s);
 3077                         break;
 3078                 }
 3079 
 3080                 /*
 3081                  * Traverse the list of peripherals and attempt to find 
 3082                  * the requested peripheral.
 3083                  */
 3084                 for (nperiph = periph_head->slh_first, i = 0;
 3085                      (nperiph != NULL) && (i <= cgdl->index);
 3086                      nperiph = nperiph->periph_links.sle_next, i++) {
 3087                         if (i == cgdl->index) {
 3088                                 strncpy(cgdl->periph_name,
 3089                                         nperiph->periph_name,
 3090                                         DEV_IDLEN);
 3091                                 cgdl->unit_number = nperiph->unit_number;
 3092                                 found = 1;
 3093                         }
 3094                 }
 3095                 if (found == 0) {
 3096                         cgdl->status = CAM_GDEVLIST_ERROR;
 3097                         splx(s);
 3098                         break;
 3099                 }
 3100 
 3101                 if (nperiph == NULL)
 3102                         cgdl->status = CAM_GDEVLIST_LAST_DEVICE;
 3103                 else
 3104                         cgdl->status = CAM_GDEVLIST_MORE_DEVS;
 3105 
 3106                 cgdl->index++;
 3107                 cgdl->generation = device->generation;
 3108 
 3109                 splx(s);
 3110                 cgdl->ccb_h.status = CAM_REQ_CMP;
 3111                 break;
 3112         }
 3113         case XPT_DEV_MATCH:
 3114         {
 3115                 int s;
 3116                 dev_pos_type position_type;
 3117                 struct ccb_dev_match *cdm;
 3118                 int ret;
 3119 
 3120                 cdm = &start_ccb->cdm;
 3121 
 3122                 /*
 3123                  * Prevent EDT changes while we traverse it.
 3124                  */
 3125                 s = splcam();
 3126                 /*
 3127                  * There are two ways of getting at information in the EDT.
 3128                  * The first way is via the primary EDT tree.  It starts
 3129                  * with a list of busses, then a list of targets on a bus,
 3130                  * then devices/luns on a target, and then peripherals on a
 3131                  * device/lun.  The "other" way is by the peripheral driver
 3132                  * lists.  The peripheral driver lists are organized by
 3133                  * peripheral driver.  (obviously)  So it makes sense to
 3134                  * use the peripheral driver list if the user is looking
 3135                  * for something like "da1", or all "da" devices.  If the
 3136                  * user is looking for something on a particular bus/target
 3137                  * or lun, it's generally better to go through the EDT tree.
 3138                  */
 3139 
 3140                 if (cdm->pos.position_type != CAM_DEV_POS_NONE)
 3141                         position_type = cdm->pos.position_type;
 3142                 else {
 3143                         int i;
 3144 
 3145                         position_type = CAM_DEV_POS_NONE;
 3146 
 3147                         for (i = 0; i < cdm->num_patterns; i++) {
 3148                                 if ((cdm->patterns[i].type == DEV_MATCH_BUS)
 3149                                  ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){
 3150                                         position_type = CAM_DEV_POS_EDT;
 3151                                         break;
 3152                                 }
 3153                         }
 3154 
 3155                         if (cdm->num_patterns == 0)
 3156                                 position_type = CAM_DEV_POS_EDT;
 3157                         else if (position_type == CAM_DEV_POS_NONE)
 3158                                 position_type = CAM_DEV_POS_PDRV;
 3159                 }
 3160 
 3161                 switch(position_type & CAM_DEV_POS_TYPEMASK) {
 3162                 case CAM_DEV_POS_EDT:
 3163                         ret = xptedtmatch(cdm);
 3164                         break;
 3165                 case CAM_DEV_POS_PDRV:
 3166                         ret = xptperiphlistmatch(cdm);
 3167                         break;
 3168                 default:
 3169                         cdm->status = CAM_DEV_MATCH_ERROR;
 3170                         break;
 3171                 }
 3172 
 3173                 splx(s);
 3174 
 3175                 if (cdm->status == CAM_DEV_MATCH_ERROR)
 3176                         start_ccb->ccb_h.status = CAM_REQ_CMP_ERR;
 3177                 else
 3178                         start_ccb->ccb_h.status = CAM_REQ_CMP;
 3179 
 3180                 break;
 3181         }
 3182         case XPT_SASYNC_CB:
 3183         {
 3184                 struct ccb_setasync *csa;
 3185                 struct async_node *cur_entry;
 3186                 struct async_list *async_head;
 3187                 u_int32_t added;
 3188                 int s;
 3189 
 3190                 csa = &start_ccb->csa;
 3191                 added = csa->event_enable;
 3192                 async_head = &csa->ccb_h.path->device->asyncs;
 3193 
 3194                 /*
 3195                  * If there is already an entry for us, simply
 3196                  * update it.
 3197                  */
 3198                 s = splcam();
 3199                 cur_entry = SLIST_FIRST(async_head);
 3200                 while (cur_entry != NULL) {
 3201                         if ((cur_entry->callback_arg == csa->callback_arg)
 3202                          && (cur_entry->callback == csa->callback))
 3203                                 break;
 3204                         cur_entry = SLIST_NEXT(cur_entry, links);
 3205                 }
 3206 
 3207                 if (cur_entry != NULL) {
 3208                         /*
 3209                          * If the request has no flags set,
 3210                          * remove the entry.
 3211                          */
 3212                         added &= ~cur_entry->event_enable;
 3213                         if (csa->event_enable == 0) {
 3214                                 SLIST_REMOVE(async_head, cur_entry,
 3215                                              async_node, links);
 3216                                 csa->ccb_h.path->device->refcount--;
 3217                                 free(cur_entry, M_DEVBUF);
 3218                         } else {
 3219                                 cur_entry->event_enable = csa->event_enable;
 3220                         }
 3221                 } else {
 3222                         cur_entry = malloc(sizeof(*cur_entry), M_DEVBUF,
 3223                                            M_NOWAIT);
 3224                         if (cur_entry == NULL) {
 3225                                 splx(s);
 3226                                 csa->ccb_h.status = CAM_RESRC_UNAVAIL;
 3227                                 break;
 3228                         }
 3229                         cur_entry->event_enable = csa->event_enable;
 3230                         cur_entry->callback_arg = csa->callback_arg;
 3231                         cur_entry->callback = csa->callback;
 3232                         SLIST_INSERT_HEAD(async_head, cur_entry, links);
 3233                         csa->ccb_h.path->device->refcount++;
 3234                 }
 3235 
 3236                 if ((added & AC_FOUND_DEVICE) != 0) {
 3237                         /*
 3238                          * Get this peripheral up to date with all
 3239                          * the currently existing devices.
 3240                          */
 3241                         xpt_for_all_devices(xptsetasyncfunc, cur_entry);
 3242                 }
 3243                 if ((added & AC_PATH_REGISTERED) != 0) {
 3244                         /*
 3245                          * Get this peripheral up to date with all
 3246                          * the currently existing busses.
 3247                          */
 3248                         xpt_for_all_busses(xptsetasyncbusfunc, cur_entry);
 3249                 }
 3250                 splx(s);
 3251                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 3252                 break;
 3253         }
 3254         case XPT_REL_SIMQ:
 3255         {
 3256                 struct ccb_relsim *crs;
 3257                 struct cam_ed *dev;
 3258                 int s;
 3259 
 3260                 crs = &start_ccb->crs;
 3261                 dev = crs->ccb_h.path->device;
 3262                 if (dev == NULL) {
 3263 
 3264                         crs->ccb_h.status = CAM_DEV_NOT_THERE;
 3265                         break;
 3266                 }
 3267 
 3268                 s = splcam();
 3269 
 3270                 if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) {
 3271 
 3272                         if ((dev->inq_data.flags & SID_CmdQue) != 0) {
 3273 
 3274                                 /* Don't ever go below one opening */
 3275                                 if (crs->openings > 0) {
 3276                                         xpt_dev_ccbq_resize(crs->ccb_h.path,
 3277                                                             crs->openings);
 3278 
 3279                                         if (bootverbose) {
 3280                                                 xpt_print_path(crs->ccb_h.path);
 3281                                                 printf("tagged openings "
 3282                                                        "now %d\n",
 3283                                                        crs->openings);
 3284                                         }
 3285                                 }
 3286                         }
 3287                 }
 3288 
 3289                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) {
 3290 
 3291                         if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
 3292 
 3293                                 /*
 3294                                  * Just extend the old timeout and decrement
 3295                                  * the freeze count so that a single timeout
 3296                                  * is sufficient for releasing the queue.
 3297                                  */
 3298                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
 3299                                 untimeout(xpt_release_devq_timeout,
 3300                                           dev, dev->c_handle);
 3301                         } else {
 3302 
 3303                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
 3304                         }
 3305 
 3306                         dev->c_handle =
 3307                                 timeout(xpt_release_devq_timeout,
 3308                                         dev,
 3309                                         (crs->release_timeout * hz) / 1000);
 3310 
 3311                         dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING;
 3312 
 3313                 }
 3314 
 3315                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) {
 3316 
 3317                         if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) {
 3318                                 /*
 3319                                  * Decrement the freeze count so that a single
 3320                                  * completion is still sufficient to unfreeze
 3321                                  * the queue.
 3322                                  */
 3323                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
 3324                         } else {
 3325                                 
 3326                                 dev->flags |= CAM_DEV_REL_ON_COMPLETE;
 3327                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
 3328                         }
 3329                 }
 3330 
 3331                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) {
 3332 
 3333                         if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
 3334                          || (dev->ccbq.dev_active == 0)) {
 3335 
 3336                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
 3337                         } else {
 3338                                 
 3339                                 dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY;
 3340                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
 3341                         }
 3342                 }
 3343                 splx(s);
 3344                 
 3345                 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0) {
 3346 
 3347                         xpt_release_devq(crs->ccb_h.path, /*count*/1,
 3348                                          /*run_queue*/TRUE);
 3349                 }
 3350                 start_ccb->crs.qfrozen_cnt = dev->qfrozen_cnt;
 3351                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 3352                 break;
 3353         }
 3354         case XPT_SCAN_BUS:
 3355                 xpt_scan_bus(start_ccb->ccb_h.path->periph, start_ccb);
 3356                 break;
 3357         case XPT_SCAN_LUN:
 3358                 xpt_scan_lun(start_ccb->ccb_h.path->periph,
 3359                              start_ccb->ccb_h.path, start_ccb->crcn.flags,
 3360                              start_ccb);
 3361                 break;
 3362         case XPT_DEBUG: {
 3363 #ifdef CAMDEBUG
 3364                 int s;
 3365                 
 3366                 s = splcam();
 3367 #ifdef CAM_DEBUG_DELAY
 3368                 cam_debug_delay = CAM_DEBUG_DELAY;
 3369 #endif
 3370                 cam_dflags = start_ccb->cdbg.flags;
 3371                 if (cam_dpath != NULL) {
 3372                         xpt_free_path(cam_dpath);
 3373                         cam_dpath = NULL;
 3374                 }
 3375 
 3376                 if (cam_dflags != CAM_DEBUG_NONE) {
 3377                         if (xpt_create_path(&cam_dpath, xpt_periph,
 3378                                             start_ccb->ccb_h.path_id,
 3379                                             start_ccb->ccb_h.target_id,
 3380                                             start_ccb->ccb_h.target_lun) !=
 3381                                             CAM_REQ_CMP) {
 3382                                 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
 3383                                 cam_dflags = CAM_DEBUG_NONE;
 3384                         } else {
 3385                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 3386                                 xpt_print_path(cam_dpath);
 3387                                 printf("debugging flags now %x\n", cam_dflags);
 3388                         }
 3389                 } else {
 3390                         cam_dpath = NULL;
 3391                         start_ccb->ccb_h.status = CAM_REQ_CMP;
 3392                 }
 3393                 splx(s);
 3394 #else /* !CAMDEBUG */
 3395                 start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
 3396 #endif /* CAMDEBUG */
 3397                 break;
 3398         }
 3399         case XPT_NOOP:
 3400                 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0)
 3401                         xpt_freeze_devq(start_ccb->ccb_h.path, 1);
 3402                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 3403                 break;
 3404         default:
 3405         case XPT_SDEV_TYPE:
 3406         case XPT_TERM_IO:
 3407         case XPT_ENG_INQ:
 3408                 /* XXX Implement */
 3409                 start_ccb->ccb_h.status = CAM_PROVIDE_FAIL;
 3410                 break;
 3411         }
 3412         splx(iopl);
 3413 }
 3414 
 3415 void
 3416 xpt_polled_action(union ccb *start_ccb)
 3417 {
 3418         int       s;
 3419         u_int32_t timeout;
 3420         struct    cam_sim *sim; 
 3421         struct    cam_devq *devq;
 3422         struct    cam_ed *dev;
 3423 
 3424         timeout = start_ccb->ccb_h.timeout;
 3425         sim = start_ccb->ccb_h.path->bus->sim;
 3426         devq = sim->devq;
 3427         dev = start_ccb->ccb_h.path->device;
 3428 
 3429         s = splcam();
 3430 
 3431         /*
 3432          * Steal an opening so that no other queued requests
 3433          * can get it before us while we simulate interrupts.
 3434          */
 3435         dev->ccbq.devq_openings--;
 3436         dev->ccbq.dev_openings--;       
 3437         
 3438         while((devq->send_openings <= 0 || dev->ccbq.dev_openings < 0)
 3439            && (--timeout > 0)) {
 3440                 DELAY(1000);
 3441                 (*(sim->sim_poll))(sim);
 3442                 swi_camnet();
 3443                 swi_cambio();           
 3444         }
 3445         
 3446         dev->ccbq.devq_openings++;
 3447         dev->ccbq.dev_openings++;
 3448         
 3449         if (timeout != 0) {
 3450                 xpt_action(start_ccb);
 3451                 while(--timeout > 0) {
 3452                         (*(sim->sim_poll))(sim);
 3453                         swi_camnet();
 3454                         swi_cambio();
 3455                         if ((start_ccb->ccb_h.status  & CAM_STATUS_MASK)
 3456                             != CAM_REQ_INPROG)
 3457                                 break;
 3458                         DELAY(1000);
 3459                 }
 3460                 if (timeout == 0) {
 3461                         /*
 3462                          * XXX Is it worth adding a sim_timeout entry
 3463                          * point so we can attempt recovery?  If
 3464                          * this is only used for dumps, I don't think
 3465                          * it is.
 3466                          */
 3467                         start_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
 3468                 }
 3469         } else {
 3470                 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
 3471         }
 3472         splx(s);
 3473 }
 3474         
 3475 /*
 3476  * Schedule a peripheral driver to receive a ccb when it's
 3477  * target device has space for more transactions.
 3478  */
 3479 void
 3480 xpt_schedule(struct cam_periph *perph, u_int32_t new_priority)
 3481 {
 3482         struct cam_ed *device;
 3483         int s;
 3484         int runq;
 3485 
 3486         CAM_DEBUG(perph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n"));
 3487         device = perph->path->device;
 3488         s = splsoftcam();
 3489         if (periph_is_queued(perph)) {
 3490                 /* Simply reorder based on new priority */
 3491                 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
 3492                           ("   change priority to %d\n", new_priority));
 3493                 if (new_priority < perph->pinfo.priority) {
 3494                         camq_change_priority(&device->drvq,
 3495                                              perph->pinfo.index,
 3496                                              new_priority);
 3497                 }
 3498                 runq = 0;
 3499         } else {
 3500                 /* New entry on the queue */
 3501                 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
 3502                           ("   added periph to queue\n"));
 3503                 perph->pinfo.priority = new_priority;
 3504                 perph->pinfo.generation = ++device->drvq.generation;
 3505                 camq_insert(&device->drvq, &perph->pinfo);
 3506                 runq = xpt_schedule_dev_allocq(perph->path->bus, device);
 3507         }
 3508         splx(s);
 3509         if (runq != 0) {
 3510                 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
 3511                           ("   calling xpt_run_devq\n"));
 3512                 xpt_run_dev_allocq(perph->path->bus);
 3513         }
 3514 }
 3515 
 3516 
 3517 /*
 3518  * Schedule a device to run on a given queue.
 3519  * If the device was inserted as a new entry on the queue,
 3520  * return 1 meaning the device queue should be run. If we
 3521  * were already queued, implying someone else has already
 3522  * started the queue, return 0 so the caller doesn't attempt
 3523  * to run the queue.  Must be run at either splsoftcam
 3524  * (or splcam since that encompases splsoftcam).
 3525  */
 3526 static int
 3527 xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo,
 3528                  u_int32_t new_priority)
 3529 {
 3530         int retval;
 3531         u_int32_t old_priority;
 3532 
 3533         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_schedule_dev\n"));
 3534 
 3535         old_priority = pinfo->priority;
 3536 
 3537         /*
 3538          * Are we already queued?
 3539          */
 3540         if (pinfo->index != CAM_UNQUEUED_INDEX) {
 3541                 /* Simply reorder based on new priority */
 3542                 if (new_priority < old_priority) {
 3543                         camq_change_priority(queue, pinfo->index,
 3544                                              new_priority);
 3545                         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3546                                         ("changed priority to %d\n",
 3547                                          new_priority));
 3548                 }
 3549                 retval = 0;
 3550         } else {
 3551                 /* New entry on the queue */
 3552                 if (new_priority < old_priority)
 3553                         pinfo->priority = new_priority;
 3554 
 3555                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3556                                 ("Inserting onto queue\n"));
 3557                 pinfo->generation = ++queue->generation;
 3558                 camq_insert(queue, pinfo);
 3559                 retval = 1;
 3560         }
 3561         return (retval);
 3562 }
 3563 
 3564 static void
 3565 xpt_run_dev_allocq(struct cam_eb *bus)
 3566 {
 3567         struct  cam_devq *devq;
 3568         int     s;
 3569 
 3570         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_allocq\n"));
 3571         devq = bus->sim->devq;
 3572 
 3573         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3574                         ("   qfrozen_cnt == 0x%x, entries == %d, "
 3575                          "openings == %d, active == %d\n",
 3576                          devq->alloc_queue.qfrozen_cnt,
 3577                          devq->alloc_queue.entries,
 3578                          devq->alloc_openings,
 3579                          devq->alloc_active));
 3580 
 3581         s = splsoftcam();
 3582         devq->alloc_queue.qfrozen_cnt++;
 3583         while ((devq->alloc_queue.entries > 0)
 3584             && (devq->alloc_openings > 0)
 3585             && (devq->alloc_queue.qfrozen_cnt <= 1)) {
 3586                 struct  cam_ed_qinfo *qinfo;
 3587                 struct  cam_ed *device;
 3588                 union   ccb *work_ccb;
 3589                 struct  cam_periph *drv;
 3590                 struct  camq *drvq;
 3591                 
 3592                 qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->alloc_queue,
 3593                                                            CAMQ_HEAD);
 3594                 device = qinfo->device;
 3595 
 3596                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3597                                 ("running device %p\n", device));
 3598 
 3599                 drvq = &device->drvq;
 3600 
 3601 #ifdef CAMDEBUG
 3602                 if (drvq->entries <= 0) {
 3603                         panic("xpt_run_dev_allocq: "
 3604                               "Device on queue without any work to do");
 3605                 }
 3606 #endif
 3607                 if ((work_ccb = xpt_get_ccb(device)) != NULL) {
 3608                         devq->alloc_openings--;
 3609                         devq->alloc_active++;
 3610                         drv = (struct cam_periph*)camq_remove(drvq, CAMQ_HEAD);
 3611                         splx(s);
 3612                         xpt_setup_ccb(&work_ccb->ccb_h, drv->path,
 3613                                       drv->pinfo.priority);
 3614                         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3615                                         ("calling periph start\n"));
 3616                         drv->periph_start(drv, work_ccb);
 3617                 } else {
 3618                         /*
 3619                          * Malloc failure in alloc_ccb
 3620                          */
 3621                         /*
 3622                          * XXX add us to a list to be run from free_ccb
 3623                          * if we don't have any ccbs active on this
 3624                          * device queue otherwise we may never get run
 3625                          * again.
 3626                          */
 3627                         break;
 3628                 }
 3629         
 3630                 /* Raise IPL for possible insertion and test at top of loop */
 3631                 s = splsoftcam();
 3632 
 3633                 if (drvq->entries > 0) {
 3634                         /* We have more work.  Attempt to reschedule */
 3635                         xpt_schedule_dev_allocq(bus, device);
 3636                 }
 3637         }
 3638         devq->alloc_queue.qfrozen_cnt--;
 3639         splx(s);
 3640 }
 3641 
 3642 static void
 3643 xpt_run_dev_sendq(struct cam_eb *bus)
 3644 {
 3645         struct  cam_devq *devq;
 3646         int     s;
 3647 
 3648         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_sendq\n"));
 3649         
 3650         devq = bus->sim->devq;
 3651 
 3652         s = splcam();
 3653         devq->send_queue.qfrozen_cnt++;
 3654         splx(s);
 3655         s = splsoftcam();
 3656         while ((devq->send_queue.entries > 0)
 3657             && (devq->send_openings > 0)) {
 3658                 struct  cam_ed_qinfo *qinfo;
 3659                 struct  cam_ed *device;
 3660                 union ccb *work_ccb;
 3661                 struct  cam_sim *sim;
 3662                 int     ospl;
 3663 
 3664                 ospl = splcam();
 3665                 if (devq->send_queue.qfrozen_cnt > 1) {
 3666                         splx(ospl);
 3667                         break;
 3668                 }
 3669 
 3670                 qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->send_queue,
 3671                                                            CAMQ_HEAD);
 3672                 device = qinfo->device;
 3673 
 3674                 /*
 3675                  * If the device has been "frozen", don't attempt
 3676                  * to run it.
 3677                  */
 3678                 if (device->qfrozen_cnt > 0) {
 3679                         splx(ospl);
 3680                         continue;
 3681                 }
 3682 
 3683                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3684                                 ("running device %p\n", device));
 3685 
 3686                 work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD);
 3687                 if (work_ccb == NULL) {
 3688                         printf("device on run queue with no ccbs???\n");
 3689                         splx(ospl);
 3690                         continue;
 3691                 }
 3692 
 3693                 if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) {
 3694 
 3695                         if (num_highpower <= 0) {
 3696                                 /*
 3697                                  * We got a high power command, but we
 3698                                  * don't have any available slots.  Freeze
 3699                                  * the device queue until we have a slot
 3700                                  * available.
 3701                                  */
 3702                                 device->qfrozen_cnt++;
 3703                                 STAILQ_INSERT_TAIL(&highpowerq, 
 3704                                                    &work_ccb->ccb_h, 
 3705                                                    xpt_links.stqe);
 3706 
 3707                                 splx(ospl);
 3708                                 continue;
 3709                         } else {
 3710                                 /*
 3711                                  * Consume a high power slot while
 3712                                  * this ccb runs.
 3713                                  */
 3714                                 num_highpower--;
 3715                         }
 3716                 }
 3717                 devq->active_dev = device;
 3718                 cam_ccbq_remove_ccb(&device->ccbq, work_ccb);
 3719 
 3720                 cam_ccbq_send_ccb(&device->ccbq, work_ccb);
 3721                 splx(ospl);
 3722 
 3723                 devq->send_openings--;
 3724                 devq->send_active++;            
 3725                 
 3726                 if (device->ccbq.queue.entries > 0)
 3727                         xpt_schedule_dev_sendq(bus, device);
 3728 
 3729                 if (work_ccb && (work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0){
 3730                         /*
 3731                          * The client wants to freeze the queue
 3732                          * after this CCB is sent.
 3733                          */
 3734                         ospl = splcam();
 3735                         device->qfrozen_cnt++;
 3736                         splx(ospl);
 3737                 }
 3738                 
 3739                 splx(s);
 3740 
 3741                 /* In Target mode, the peripheral driver knows best... */
 3742                 if (work_ccb->ccb_h.func_code == XPT_SCSI_IO) {
 3743                         if ((device->inq_flags & SID_CmdQue) != 0
 3744                          && work_ccb->csio.tag_action != CAM_TAG_ACTION_NONE)
 3745                                 work_ccb->ccb_h.flags |= CAM_TAG_ACTION_VALID;
 3746                         else
 3747                                 /*
 3748                                  * Clear this in case of a retried CCB that
 3749                                  * failed due to a rejected tag.
 3750                                  */
 3751                                 work_ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
 3752                 }
 3753 
 3754                 /*
 3755                  * Device queues can be shared among multiple sim instances
 3756                  * that reside on different busses.  Use the SIM in the queue
 3757                  * CCB's path, rather than the one in the bus that was passed
 3758                  * into this function.
 3759                  */
 3760                 sim = work_ccb->ccb_h.path->bus->sim;
 3761                 (*(sim->sim_action))(sim, work_ccb);
 3762 
 3763                 ospl = splcam();
 3764                 devq->active_dev = NULL;
 3765                 splx(ospl);
 3766                 /* Raise IPL for possible insertion and test at top of loop */
 3767                 s = splsoftcam();
 3768         }
 3769         splx(s);
 3770         s = splcam();
 3771         devq->send_queue.qfrozen_cnt--;
 3772         splx(s);
 3773 }
 3774 
 3775 /*
 3776  * This function merges stuff from the slave ccb into the master ccb, while
 3777  * keeping important fields in the master ccb constant.
 3778  */
 3779 void
 3780 xpt_merge_ccb(union ccb *master_ccb, union ccb *slave_ccb)
 3781 {
 3782         /*
 3783          * Pull fields that are valid for peripheral drivers to set
 3784          * into the master CCB along with the CCB "payload".
 3785          */
 3786         master_ccb->ccb_h.retry_count = slave_ccb->ccb_h.retry_count;
 3787         master_ccb->ccb_h.func_code = slave_ccb->ccb_h.func_code;
 3788         master_ccb->ccb_h.timeout = slave_ccb->ccb_h.timeout;
 3789         master_ccb->ccb_h.flags = slave_ccb->ccb_h.flags;
 3790         bcopy(&(&slave_ccb->ccb_h)[1], &(&master_ccb->ccb_h)[1],
 3791               sizeof(union ccb) - sizeof(struct ccb_hdr));
 3792 }
 3793 
 3794 void
 3795 xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority)
 3796 {
 3797         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_setup_ccb\n"));
 3798         ccb_h->pinfo.priority = priority;
 3799         ccb_h->path = path;
 3800         ccb_h->path_id = path->bus->path_id;
 3801         if (path->target)
 3802                 ccb_h->target_id = path->target->target_id;
 3803         else
 3804                 ccb_h->target_id = CAM_TARGET_WILDCARD;
 3805         if (path->device) {
 3806                 ccb_h->target_lun = path->device->lun_id;
 3807                 ccb_h->pinfo.generation = ++path->device->ccbq.queue.generation;
 3808         } else {
 3809                 ccb_h->target_lun = CAM_TARGET_WILDCARD;
 3810         }
 3811         ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
 3812         ccb_h->flags = 0;
 3813 }
 3814 
 3815 /* Path manipulation functions */
 3816 cam_status
 3817 xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph,
 3818                 path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
 3819 {
 3820         struct     cam_path *path;
 3821         cam_status status;
 3822 
 3823         path = (struct cam_path *)malloc(sizeof(*path), M_DEVBUF, M_NOWAIT);
 3824 
 3825         if (path == NULL) {
 3826                 status = CAM_RESRC_UNAVAIL;
 3827                 return(status);
 3828         }
 3829         status = xpt_compile_path(path, perph, path_id, target_id, lun_id);
 3830         if (status != CAM_REQ_CMP) {
 3831                 free(path, M_DEVBUF);
 3832                 path = NULL;
 3833         }
 3834         *new_path_ptr = path;
 3835         return (status);
 3836 }
 3837 
 3838 static cam_status
 3839 xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph,
 3840                  path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
 3841 {
 3842         struct       cam_eb *bus;
 3843         struct       cam_et *target;
 3844         struct       cam_ed *device;
 3845         cam_status   status;
 3846         int          s;
 3847 
 3848         status = CAM_REQ_CMP;   /* Completed without error */
 3849         target = NULL;          /* Wildcarded */
 3850         device = NULL;          /* Wildcarded */
 3851 
 3852         /*
 3853          * We will potentially modify the EDT, so block interrupts
 3854          * that may attempt to create cam paths.
 3855          */
 3856         s = splcam();
 3857         bus = xpt_find_bus(path_id);
 3858         if (bus == NULL) {
 3859                 status = CAM_PATH_INVALID;
 3860         } else {
 3861                 target = xpt_find_target(bus, target_id);
 3862                 if (target == NULL) {
 3863                         /* Create one */
 3864                         struct cam_et *new_target;
 3865 
 3866                         new_target = xpt_alloc_target(bus, target_id);
 3867                         if (new_target == NULL) {
 3868                                 status = CAM_RESRC_UNAVAIL;
 3869                         } else {
 3870                                 target = new_target;
 3871                         }
 3872                 }
 3873                 if (target != NULL) {
 3874                         device = xpt_find_device(target, lun_id);
 3875                         if (device == NULL) {
 3876                                 /* Create one */
 3877                                 struct cam_ed *new_device;
 3878 
 3879                                 new_device = xpt_alloc_device(bus,
 3880                                                               target,
 3881                                                               lun_id);
 3882                                 if (new_device == NULL) {
 3883                                         status = CAM_RESRC_UNAVAIL;
 3884                                 } else {
 3885                                         device = new_device;
 3886                                 }
 3887                         }
 3888                 }
 3889         }
 3890         splx(s);
 3891 
 3892         /*
 3893          * Only touch the user's data if we are successful.
 3894          */
 3895         if (status == CAM_REQ_CMP) {
 3896                 new_path->periph = perph;
 3897                 new_path->bus = bus;
 3898                 new_path->target = target;
 3899                 new_path->device = device;
 3900                 CAM_DEBUG(new_path, CAM_DEBUG_TRACE, ("xpt_compile_path\n"));
 3901         } else {
 3902                 if (device != NULL)
 3903                         xpt_release_device(bus, target, device);
 3904                 if (target != NULL)
 3905                         xpt_release_target(bus, target);
 3906                 if (bus != NULL)
 3907                         xpt_release_bus(bus);
 3908         }
 3909         return (status);
 3910 }
 3911 
 3912 static void
 3913 xpt_release_path(struct cam_path *path)
 3914 {
 3915         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_path\n"));
 3916         if (path->device != NULL) {
 3917                 xpt_release_device(path->bus, path->target, path->device);
 3918                 path->device = NULL;
 3919         }
 3920         if (path->target != NULL) {
 3921                 xpt_release_target(path->bus, path->target);
 3922                 path->target = NULL;
 3923         }
 3924         if (path->bus != NULL) {
 3925                 xpt_release_bus(path->bus);
 3926                 path->bus = NULL;
 3927         }
 3928 }
 3929 
 3930 void
 3931 xpt_free_path(struct cam_path *path)
 3932 {
 3933         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_free_path\n"));
 3934         xpt_release_path(path);
 3935         free(path, M_DEVBUF);
 3936 }
 3937 
 3938 
 3939 /*
 3940  * Return -1 for failure, 0 for exact match, 1 for match with wildcards
 3941  * in path1, 2 for match with wildcards in path2.
 3942  */
 3943 int
 3944 xpt_path_comp(struct cam_path *path1, struct cam_path *path2)
 3945 {
 3946         int retval = 0;
 3947 
 3948         if (path1->bus != path2->bus) {
 3949                 if (path1->bus->path_id == CAM_BUS_WILDCARD)
 3950                         retval = 1;
 3951                 else if (path2->bus->path_id == CAM_BUS_WILDCARD)
 3952                         retval = 2;
 3953                 else
 3954                         return (-1);
 3955         }
 3956         if (path1->target != path2->target) {
 3957                 if (path1->target->target_id == CAM_TARGET_WILDCARD) {
 3958                         if (retval == 0)
 3959                                 retval = 1;
 3960                 } else if (path2->target->target_id == CAM_TARGET_WILDCARD)
 3961                         retval = 2;
 3962                 else
 3963                         return (-1);
 3964         }
 3965         if (path1->device != path2->device) {
 3966                 if (path1->device->lun_id == CAM_LUN_WILDCARD) {
 3967                         if (retval == 0)
 3968                                 retval = 1;
 3969                 } else if (path2->device->lun_id == CAM_LUN_WILDCARD)
 3970                         retval = 2;
 3971                 else
 3972                         return (-1);
 3973         }
 3974         return (retval);
 3975 }
 3976 
 3977 void
 3978 xpt_print_path(struct cam_path *path)
 3979 {
 3980         if (path == NULL)
 3981                 printf("(nopath): ");
 3982         else {
 3983                 if (path->periph != NULL)
 3984                         printf("(%s%d:", path->periph->periph_name,
 3985                                path->periph->unit_number);
 3986                 else
 3987                         printf("(noperiph:");
 3988 
 3989                 if (path->bus != NULL)
 3990                         printf("%s%d:%d:", path->bus->sim->sim_name,
 3991                                path->bus->sim->unit_number,
 3992                                path->bus->sim->bus_id);
 3993                 else
 3994                         printf("nobus:");
 3995 
 3996                 if (path->target != NULL)
 3997                         printf("%d:", path->target->target_id);
 3998                 else
 3999                         printf("X:");
 4000 
 4001                 if (path->device != NULL)
 4002                         printf("%d): ", path->device->lun_id);
 4003                 else
 4004                         printf("X): ");
 4005         }
 4006 }
 4007 
 4008 void
 4009 xpt_print(struct cam_path *path, const char *fmt, ...)
 4010 {
 4011         va_list ap;
 4012         xpt_print_path(path);
 4013         va_start(ap, fmt);
 4014         vprintf(fmt, ap);
 4015         va_end(ap);
 4016 }
 4017 
 4018 path_id_t
 4019 xpt_path_path_id(struct cam_path *path)
 4020 {
 4021         return(path->bus->path_id);
 4022 }
 4023 
 4024 target_id_t
 4025 xpt_path_target_id(struct cam_path *path)
 4026 {
 4027         if (path->target != NULL)
 4028                 return (path->target->target_id);
 4029         else
 4030                 return (CAM_TARGET_WILDCARD);
 4031 }
 4032 
 4033 lun_id_t
 4034 xpt_path_lun_id(struct cam_path *path)
 4035 {
 4036         if (path->device != NULL)
 4037                 return (path->device->lun_id);
 4038         else
 4039                 return (CAM_LUN_WILDCARD);
 4040 }
 4041 
 4042 struct cam_sim *
 4043 xpt_path_sim(struct cam_path *path)
 4044 {
 4045         return (path->bus->sim);
 4046 }
 4047 
 4048 struct cam_periph*
 4049 xpt_path_periph(struct cam_path *path)
 4050 {
 4051         return (path->periph);
 4052 }
 4053 
 4054 /*
 4055  * Release a CAM control block for the caller.  Remit the cost of the structure
 4056  * to the device referenced by the path.  If the this device had no 'credits'
 4057  * and peripheral drivers have registered async callbacks for this notification
 4058  * call them now.
 4059  */
 4060 void
 4061 xpt_release_ccb(union ccb *free_ccb)
 4062 {
 4063         int      s;
 4064         struct   cam_path *path;
 4065         struct   cam_ed *device;
 4066         struct   cam_eb *bus;
 4067 
 4068         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_release_ccb\n"));
 4069         path = free_ccb->ccb_h.path;
 4070         device = path->device;
 4071         bus = path->bus;
 4072         s = splsoftcam();
 4073         cam_ccbq_release_opening(&device->ccbq);
 4074         if (xpt_ccb_count > xpt_max_ccbs) {
 4075                 xpt_free_ccb(free_ccb);
 4076                 xpt_ccb_count--;
 4077         } else {
 4078                 SLIST_INSERT_HEAD(&ccb_freeq, &free_ccb->ccb_h, xpt_links.sle);
 4079         }
 4080         bus->sim->devq->alloc_openings++;
 4081         bus->sim->devq->alloc_active--;
 4082         /* XXX Turn this into an inline function - xpt_run_device?? */
 4083         if ((device_is_alloc_queued(device) == 0)
 4084          && (device->drvq.entries > 0)) {
 4085                 xpt_schedule_dev_allocq(bus, device);
 4086         }
 4087         splx(s);
 4088         if (dev_allocq_is_runnable(bus->sim->devq))
 4089                 xpt_run_dev_allocq(bus);
 4090 }
 4091 
 4092 /* Functions accessed by SIM drivers */
 4093 
 4094 /*
 4095  * A sim structure, listing the SIM entry points and instance
 4096  * identification info is passed to xpt_bus_register to hook the SIM
 4097  * into the CAM framework.  xpt_bus_register creates a cam_eb entry
 4098  * for this new bus and places it in the array of busses and assigns
 4099  * it a path_id.  The path_id may be influenced by "hard wiring"
 4100  * information specified by the user.  Once interrupt services are
 4101  * availible, the bus will be probed.
 4102  */
 4103 int32_t
 4104 xpt_bus_register(struct cam_sim *sim, u_int32_t bus)
 4105 {
 4106         struct cam_eb *new_bus;
 4107         struct cam_eb *old_bus;
 4108         struct ccb_pathinq cpi;
 4109         int s;
 4110 
 4111         sim->bus_id = bus;
 4112         new_bus = (struct cam_eb *)malloc(sizeof(*new_bus),
 4113                                           M_DEVBUF, M_NOWAIT);
 4114         if (new_bus == NULL) {
 4115                 /* Couldn't satisfy request */
 4116                 return (CAM_RESRC_UNAVAIL);
 4117         }
 4118 
 4119         if (strcmp(sim->sim_name, "xpt") != 0) {
 4120 
 4121                 sim->path_id =
 4122                     xptpathid(sim->sim_name, sim->unit_number, sim->bus_id);
 4123         }
 4124 
 4125         TAILQ_INIT(&new_bus->et_entries);
 4126         new_bus->path_id = sim->path_id;
 4127         new_bus->sim = sim;
 4128         timevalclear(&new_bus->last_reset);
 4129         new_bus->flags = 0;
 4130         new_bus->refcount = 1;  /* Held until a bus_deregister event */
 4131         new_bus->generation = 0;
 4132         s = splcam();
 4133         old_bus = TAILQ_FIRST(&xpt_busses);
 4134         while (old_bus != NULL
 4135             && old_bus->path_id < new_bus->path_id)
 4136                 old_bus = TAILQ_NEXT(old_bus, links);
 4137         if (old_bus != NULL)
 4138                 TAILQ_INSERT_BEFORE(old_bus, new_bus, links);
 4139         else
 4140                 TAILQ_INSERT_TAIL(&xpt_busses, new_bus, links);
 4141         bus_generation++;
 4142         splx(s);
 4143 
 4144         /* Notify interested parties */
 4145         if (sim->path_id != CAM_XPT_PATH_ID) {
 4146                 struct cam_path path;
 4147 
 4148                 xpt_compile_path(&path, /*periph*/NULL, sim->path_id,
 4149                                  CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
 4150                 xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
 4151                 cpi.ccb_h.func_code = XPT_PATH_INQ;
 4152                 xpt_action((union ccb *)&cpi);
 4153                 xpt_async(AC_PATH_REGISTERED, xpt_periph->path, &cpi);
 4154                 xpt_release_path(&path);
 4155         }
 4156         return (CAM_SUCCESS);
 4157 }
 4158 
 4159 int32_t
 4160 xpt_bus_deregister(path_id_t pathid)
 4161 {
 4162         struct cam_path bus_path;
 4163         cam_status status;
 4164 
 4165         status = xpt_compile_path(&bus_path, NULL, pathid,
 4166                                   CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
 4167         if (status != CAM_REQ_CMP)
 4168                 return (status);
 4169 
 4170         xpt_async(AC_LOST_DEVICE, &bus_path, NULL);
 4171         xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL);
 4172         
 4173         /* Release the reference count held while registered. */
 4174         xpt_release_bus(bus_path.bus);
 4175         xpt_release_path(&bus_path);
 4176 
 4177         return (CAM_REQ_CMP);
 4178 }
 4179 
 4180 static path_id_t
 4181 xptnextfreepathid(void)
 4182 {
 4183         struct cam_eb *bus;
 4184         path_id_t pathid;
 4185         char *strval;
 4186 
 4187         pathid = 0;
 4188         bus = TAILQ_FIRST(&xpt_busses);
 4189 retry:
 4190         /* Find an unoccupied pathid */
 4191         while (bus != NULL
 4192             && bus->path_id <= pathid) {
 4193                 if (bus->path_id == pathid)
 4194                         pathid++;
 4195                 bus = TAILQ_NEXT(bus, links);
 4196         }
 4197 
 4198         /*
 4199          * Ensure that this pathid is not reserved for
 4200          * a bus that may be registered in the future.
 4201          */
 4202         if (resource_string_value("scbus", pathid, "at", &strval) == 0) {
 4203                 ++pathid;
 4204                 /* Start the search over */
 4205                 goto retry;
 4206         }
 4207         return (pathid);
 4208 }
 4209 
 4210 static path_id_t
 4211 xptpathid(const char *sim_name, int sim_unit, int sim_bus)
 4212 {
 4213         path_id_t pathid;
 4214         int i, dunit, val;
 4215         char buf[32], *strval;
 4216 
 4217         pathid = CAM_XPT_PATH_ID;
 4218         snprintf(buf, sizeof(buf), "%s%d", sim_name, sim_unit);
 4219         i = -1;
 4220         while ((i = resource_locate(i, "scbus")) != -1) {
 4221                 dunit = resource_query_unit(i);
 4222                 if (dunit < 0)          /* unwired?! */
 4223                         continue;
 4224                 if (resource_string_value("scbus", dunit, "at", &strval) != 0)
 4225                         continue;
 4226                 if (strcmp(buf, strval) != 0)
 4227                         continue;
 4228                 if (resource_int_value("scbus", dunit, "bus", &val) == 0) {
 4229                         if (sim_bus == val) {
 4230                                 pathid = dunit;
 4231                                 break;
 4232                         }
 4233                 } else if (sim_bus == 0) {
 4234                         /* Unspecified matches bus 0 */
 4235                         pathid = dunit;
 4236                         break;
 4237                 } else {
 4238                         printf("Ambiguous scbus configuration for %s%d "
 4239                                "bus %d, cannot wire down.  The kernel "
 4240                                "config entry for scbus%d should "
 4241                                "specify a controller bus.\n"
 4242                                "Scbus will be assigned dynamically.\n",
 4243                                sim_name, sim_unit, sim_bus, dunit);
 4244                         break;
 4245                 }
 4246         }
 4247 
 4248         if (pathid == CAM_XPT_PATH_ID)
 4249                 pathid = xptnextfreepathid();
 4250         return (pathid);
 4251 }
 4252 
 4253 void
 4254 xpt_async(u_int32_t async_code, struct cam_path *path, void *async_arg)
 4255 {
 4256         struct cam_eb *bus;
 4257         struct cam_et *target, *next_target;
 4258         struct cam_ed *device, *next_device;
 4259         int s;
 4260 
 4261         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_async\n"));
 4262 
 4263         /*
 4264          * Most async events come from a CAM interrupt context.  In
 4265          * a few cases, the error recovery code at the peripheral layer,
 4266          * which may run from our SWI or a process context, may signal
 4267          * deferred events with a call to xpt_async. Ensure async
 4268          * notifications are serialized by blocking cam interrupts.
 4269          */
 4270         s = splcam();
 4271 
 4272         bus = path->bus;
 4273 
 4274         if (async_code == AC_BUS_RESET) { 
 4275                 int s;
 4276 
 4277                 s = splclock();
 4278                 /* Update our notion of when the last reset occurred */
 4279                 microtime(&bus->last_reset);
 4280                 splx(s);
 4281         }
 4282 
 4283         for (target = TAILQ_FIRST(&bus->et_entries);
 4284              target != NULL;
 4285              target = next_target) {
 4286 
 4287                 next_target = TAILQ_NEXT(target, links);
 4288 
 4289                 if (path->target != target
 4290                  && path->target->target_id != CAM_TARGET_WILDCARD
 4291                  && target->target_id != CAM_TARGET_WILDCARD)
 4292                         continue;
 4293 
 4294                 if (async_code == AC_SENT_BDR) {
 4295                         int s;
 4296 
 4297                         /* Update our notion of when the last reset occurred */
 4298                         s = splclock();
 4299                         microtime(&path->target->last_reset);
 4300                         splx(s);
 4301                 }
 4302 
 4303                 for (device = TAILQ_FIRST(&target->ed_entries);
 4304                      device != NULL;
 4305                      device = next_device) {
 4306 
 4307                         next_device = TAILQ_NEXT(device, links);
 4308 
 4309                         if (path->device != device 
 4310                          && path->device->lun_id != CAM_LUN_WILDCARD
 4311                          && device->lun_id != CAM_LUN_WILDCARD)
 4312                                 continue;
 4313 
 4314                         xpt_dev_async(async_code, bus, target,
 4315                                       device, async_arg);
 4316 
 4317                         xpt_async_bcast(&device->asyncs, async_code,
 4318                                         path, async_arg);
 4319                 }
 4320         }
 4321         
 4322         /*
 4323          * If this wasn't a fully wildcarded async, tell all
 4324          * clients that want all async events.
 4325          */
 4326         if (bus != xpt_periph->path->bus)
 4327                 xpt_async_bcast(&xpt_periph->path->device->asyncs, async_code,
 4328                                 path, async_arg);
 4329         splx(s);
 4330 }
 4331 
 4332 static void
 4333 xpt_async_bcast(struct async_list *async_head,
 4334                 u_int32_t async_code,
 4335                 struct cam_path *path, void *async_arg)
 4336 {
 4337         struct async_node *cur_entry;
 4338 
 4339         cur_entry = SLIST_FIRST(async_head);
 4340         while (cur_entry != NULL) {
 4341                 struct async_node *next_entry;
 4342                 /*
 4343                  * Grab the next list entry before we call the current
 4344                  * entry's callback.  This is because the callback function
 4345                  * can delete its async callback entry.
 4346                  */
 4347                 next_entry = SLIST_NEXT(cur_entry, links);
 4348                 if ((cur_entry->event_enable & async_code) != 0)
 4349                         cur_entry->callback(cur_entry->callback_arg,
 4350                                             async_code, path,
 4351                                             async_arg);
 4352                 cur_entry = next_entry;
 4353         }
 4354 }
 4355 
 4356 /*
 4357  * Handle any per-device event notifications that require action by the XPT.
 4358  */
 4359 static void
 4360 xpt_dev_async(u_int32_t async_code, struct cam_eb *bus, struct cam_et *target,
 4361               struct cam_ed *device, void *async_arg)
 4362 {
 4363         cam_status status;
 4364         struct cam_path newpath;
 4365 
 4366         /*
 4367          * We only need to handle events for real devices.
 4368          */
 4369         if (target->target_id == CAM_TARGET_WILDCARD
 4370          || device->lun_id == CAM_LUN_WILDCARD)
 4371                 return;
 4372 
 4373         /*
 4374          * We need our own path with wildcards expanded to
 4375          * handle certain types of events.
 4376          */
 4377         if ((async_code == AC_SENT_BDR)
 4378          || (async_code == AC_BUS_RESET)
 4379          || (async_code == AC_INQ_CHANGED))
 4380                 status = xpt_compile_path(&newpath, NULL,
 4381                                           bus->path_id,
 4382                                           target->target_id,
 4383                                           device->lun_id);
 4384         else
 4385                 status = CAM_REQ_CMP_ERR;
 4386 
 4387         if (status == CAM_REQ_CMP) {
 4388 
 4389                 /*
 4390                  * Allow transfer negotiation to occur in a
 4391                  * tag free environment.
 4392                  */
 4393                 if (async_code == AC_SENT_BDR
 4394                  || async_code == AC_BUS_RESET)
 4395                         xpt_toggle_tags(&newpath);
 4396 
 4397                 if (async_code == AC_INQ_CHANGED) {
 4398                         /*
 4399                          * We've sent a start unit command, or
 4400                          * something similar to a device that
 4401                          * may have caused its inquiry data to
 4402                          * change. So we re-scan the device to
 4403                          * refresh the inquiry data for it.
 4404                          */
 4405                         xpt_scan_lun(newpath.periph, &newpath,
 4406                                      CAM_EXPECT_INQ_CHANGE, NULL);
 4407                 }
 4408                 xpt_release_path(&newpath);
 4409         } else if (async_code == AC_LOST_DEVICE) {
 4410                 device->flags |= CAM_DEV_UNCONFIGURED;
 4411         } else if (async_code == AC_TRANSFER_NEG) {
 4412                 struct ccb_trans_settings *settings;
 4413 
 4414                 settings = (struct ccb_trans_settings *)async_arg;
 4415                 xpt_set_transfer_settings(settings, device,
 4416                                           /*async_update*/TRUE);
 4417         }
 4418 }
 4419 
 4420 u_int32_t
 4421 xpt_freeze_devq(struct cam_path *path, u_int count)
 4422 {
 4423         int s;
 4424         struct ccb_hdr *ccbh;
 4425 
 4426         s = splcam();
 4427         path->device->qfrozen_cnt += count;
 4428 
 4429         /*
 4430          * Mark the last CCB in the queue as needing
 4431          * to be requeued if the driver hasn't
 4432          * changed it's state yet.  This fixes a race
 4433          * where a ccb is just about to be queued to
 4434          * a controller driver when it's interrupt routine
 4435          * freezes the queue.  To completly close the
 4436          * hole, controller drives must check to see
 4437          * if a ccb's status is still CAM_REQ_INPROG
 4438          * under spl protection just before they queue
 4439          * the CCB.  See ahc_action/ahc_freeze_devq for
 4440          * an example.
 4441          */
 4442         ccbh = TAILQ_LAST(&path->device->ccbq.active_ccbs, ccb_hdr_tailq);
 4443         if (ccbh && ccbh->status == CAM_REQ_INPROG)
 4444                 ccbh->status = CAM_REQUEUE_REQ;
 4445         splx(s);
 4446         return (path->device->qfrozen_cnt);
 4447 }
 4448 
 4449 u_int32_t
 4450 xpt_freeze_simq(struct cam_sim *sim, u_int count)
 4451 {
 4452         sim->devq->send_queue.qfrozen_cnt += count;
 4453         if (sim->devq->active_dev != NULL) {
 4454                 struct ccb_hdr *ccbh;
 4455                 
 4456                 ccbh = TAILQ_LAST(&sim->devq->active_dev->ccbq.active_ccbs,
 4457                                   ccb_hdr_tailq);
 4458                 if (ccbh && ccbh->status == CAM_REQ_INPROG)
 4459                         ccbh->status = CAM_REQUEUE_REQ;
 4460         }
 4461         return (sim->devq->send_queue.qfrozen_cnt);
 4462 }
 4463 
 4464 static void
 4465 xpt_release_devq_timeout(void *arg)
 4466 {
 4467         struct cam_ed *device;
 4468 
 4469         device = (struct cam_ed *)arg;
 4470 
 4471         xpt_release_devq_device(device, /*count*/1, /*run_queue*/TRUE);
 4472 }
 4473 
 4474 void
 4475 xpt_release_devq(struct cam_path *path, u_int count, int run_queue)
 4476 {
 4477         xpt_release_devq_device(path->device, count, run_queue);
 4478 }
 4479 
 4480 static void
 4481 xpt_release_devq_device(struct cam_ed *dev, u_int count, int run_queue)
 4482 {
 4483         int     rundevq;
 4484         int     s0, s1;
 4485 
 4486         rundevq = 0;
 4487         s0 = splsoftcam();
 4488         s1 = splcam();
 4489         if (dev->qfrozen_cnt > 0) {
 4490 
 4491                 count = (count > dev->qfrozen_cnt) ? dev->qfrozen_cnt : count;
 4492                 dev->qfrozen_cnt -= count;
 4493                 if (dev->qfrozen_cnt == 0) {
 4494 
 4495                         /*
 4496                          * No longer need to wait for a successful
 4497                          * command completion.
 4498                          */
 4499                         dev->flags &= ~CAM_DEV_REL_ON_COMPLETE;
 4500 
 4501                         /*
 4502                          * Remove any timeouts that might be scheduled
 4503                          * to release this queue.
 4504                          */
 4505                         if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
 4506                                 untimeout(xpt_release_devq_timeout, dev,
 4507                                           dev->c_handle);
 4508                                 dev->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING;
 4509                         }
 4510 
 4511                         /*
 4512                          * Now that we are unfrozen schedule the
 4513                          * device so any pending transactions are
 4514                          * run.
 4515                          */
 4516                         if ((dev->ccbq.queue.entries > 0)
 4517                          && (xpt_schedule_dev_sendq(dev->target->bus, dev))
 4518                          && (run_queue != 0)) {
 4519                                 rundevq = 1;
 4520                         }
 4521                 }
 4522         }
 4523         splx(s1);
 4524         if (rundevq != 0)
 4525                 xpt_run_dev_sendq(dev->target->bus);
 4526         splx(s0);
 4527 }
 4528 
 4529 void
 4530 xpt_release_simq(struct cam_sim *sim, int run_queue)
 4531 {
 4532         int     s;
 4533         struct  camq *sendq;
 4534 
 4535         sendq = &(sim->devq->send_queue);
 4536         s = splcam();
 4537         if (sendq->qfrozen_cnt > 0) {
 4538 
 4539                 sendq->qfrozen_cnt--;
 4540                 if (sendq->qfrozen_cnt == 0) {
 4541                         struct cam_eb *bus;
 4542 
 4543                         /*
 4544                          * If there is a timeout scheduled to release this
 4545                          * sim queue, remove it.  The queue frozen count is
 4546                          * already at 0.
 4547                          */
 4548                         if ((sim->flags & CAM_SIM_REL_TIMEOUT_PENDING) != 0){
 4549                                 untimeout(xpt_release_simq_timeout, sim,
 4550                                           sim->c_handle);
 4551                                 sim->flags &= ~CAM_SIM_REL_TIMEOUT_PENDING;
 4552                         }
 4553                         bus = xpt_find_bus(sim->path_id);
 4554                         splx(s);
 4555 
 4556                         if (run_queue) {
 4557                                 /*
 4558                                  * Now that we are unfrozen run the send queue.
 4559                                  */
 4560                                 xpt_run_dev_sendq(bus);
 4561                         }
 4562                         xpt_release_bus(bus);
 4563                 } else
 4564                         splx(s);
 4565         } else
 4566                 splx(s);
 4567 }
 4568 
 4569 static void
 4570 xpt_release_simq_timeout(void *arg)
 4571 {
 4572         struct cam_sim *sim;
 4573 
 4574         sim = (struct cam_sim *)arg;
 4575         xpt_release_simq(sim, /* run_queue */ TRUE);
 4576 }
 4577 
 4578 void
 4579 xpt_done(union ccb *done_ccb)
 4580 {
 4581         int s;
 4582 
 4583         s = splcam();
 4584 
 4585         CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_done\n"));
 4586         if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) != 0) {
 4587                 /*
 4588                  * Queue up the request for handling by our SWI handler
 4589                  * any of the "non-immediate" type of ccbs.
 4590                  */
 4591                 switch (done_ccb->ccb_h.path->periph->type) {
 4592                 case CAM_PERIPH_BIO:
 4593                         TAILQ_INSERT_TAIL(&cam_bioq, &done_ccb->ccb_h,
 4594                                           sim_links.tqe);
 4595                         done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX;
 4596                         setsoftcambio();
 4597                         break;
 4598                 case CAM_PERIPH_NET:
 4599                         TAILQ_INSERT_TAIL(&cam_netq, &done_ccb->ccb_h,
 4600                                           sim_links.tqe);
 4601                         done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX;
 4602                         setsoftcamnet();
 4603                         break;
 4604                 }
 4605         }
 4606         splx(s);
 4607 }
 4608 
 4609 union ccb *
 4610 xpt_alloc_ccb()
 4611 {
 4612         union ccb *new_ccb;
 4613 
 4614         new_ccb = malloc(sizeof(*new_ccb), M_DEVBUF, M_WAITOK);
 4615         return (new_ccb);
 4616 }
 4617 
 4618 void
 4619 xpt_free_ccb(union ccb *free_ccb)
 4620 {
 4621         free(free_ccb, M_DEVBUF);
 4622 }
 4623 
 4624 
 4625 
 4626 /* Private XPT functions */
 4627 
 4628 /*
 4629  * Get a CAM control block for the caller. Charge the structure to the device
 4630  * referenced by the path.  If the this device has no 'credits' then the
 4631  * device already has the maximum number of outstanding operations under way
 4632  * and we return NULL. If we don't have sufficient resources to allocate more
 4633  * ccbs, we also return NULL.
 4634  */
 4635 static union ccb *
 4636 xpt_get_ccb(struct cam_ed *device)
 4637 {
 4638         union ccb *new_ccb;
 4639         int s;
 4640 
 4641         s = splsoftcam();
 4642         if ((new_ccb = (union ccb *)ccb_freeq.slh_first) == NULL) {
 4643                 new_ccb = malloc(sizeof(*new_ccb), M_DEVBUF, M_NOWAIT);
 4644                 if (new_ccb == NULL) {
 4645                         splx(s);
 4646                         return (NULL);
 4647                 }
 4648                 callout_handle_init(&new_ccb->ccb_h.timeout_ch);
 4649                 SLIST_INSERT_HEAD(&ccb_freeq, &new_ccb->ccb_h,
 4650                                   xpt_links.sle);
 4651                 xpt_ccb_count++;
 4652         }
 4653         cam_ccbq_take_opening(&device->ccbq);
 4654         SLIST_REMOVE_HEAD(&ccb_freeq, xpt_links.sle);
 4655         splx(s);
 4656         return (new_ccb);
 4657 }
 4658 
 4659 static void
 4660 xpt_release_bus(struct cam_eb *bus)
 4661 {
 4662         int s;
 4663 
 4664         s = splcam();
 4665         if ((--bus->refcount == 0)
 4666          && (TAILQ_FIRST(&bus->et_entries) == NULL)) {
 4667                 TAILQ_REMOVE(&xpt_busses, bus, links);
 4668                 bus_generation++;
 4669                 splx(s);
 4670                 free(bus, M_DEVBUF);
 4671         } else
 4672                 splx(s);
 4673 }
 4674 
 4675 static struct cam_et *
 4676 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id)
 4677 {
 4678         struct cam_et *target;
 4679 
 4680         target = (struct cam_et *)malloc(sizeof(*target), M_DEVBUF, M_NOWAIT);
 4681         if (target != NULL) {
 4682                 struct cam_et *cur_target;
 4683 
 4684                 TAILQ_INIT(&target->ed_entries);
 4685                 target->bus = bus;
 4686                 target->target_id = target_id;
 4687                 target->refcount = 1;
 4688                 target->generation = 0;
 4689                 timevalclear(&target->last_reset);
 4690                 /*
 4691                  * Hold a reference to our parent bus so it
 4692                  * will not go away before we do.
 4693                  */
 4694                 bus->refcount++;
 4695 
 4696                 /* Insertion sort into our bus's target list */
 4697                 cur_target = TAILQ_FIRST(&bus->et_entries);
 4698                 while (cur_target != NULL && cur_target->target_id < target_id)
 4699                         cur_target = TAILQ_NEXT(cur_target, links);
 4700 
 4701                 if (cur_target != NULL) {
 4702                         TAILQ_INSERT_BEFORE(cur_target, target, links);
 4703                 } else {
 4704                         TAILQ_INSERT_TAIL(&bus->et_entries, target, links);
 4705                 }
 4706                 bus->generation++;
 4707         }
 4708         return (target);
 4709 }
 4710 
 4711 static void
 4712 xpt_release_target(struct cam_eb *bus, struct cam_et *target)
 4713 {
 4714         int s;
 4715 
 4716         s = splcam();
 4717         if ((--target->refcount == 0)
 4718          && (TAILQ_FIRST(&target->ed_entries) == NULL)) {
 4719                 TAILQ_REMOVE(&bus->et_entries, target, links);
 4720                 bus->generation++;
 4721                 splx(s);
 4722                 free(target, M_DEVBUF);
 4723                 xpt_release_bus(bus);
 4724         } else
 4725                 splx(s);
 4726 }
 4727 
 4728 static struct cam_ed *
 4729 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id)
 4730 {
 4731         struct     cam_ed *device;
 4732         struct     cam_devq *devq;
 4733         cam_status status;
 4734 
 4735         /* Make space for us in the device queue on our bus */
 4736         devq = bus->sim->devq;
 4737         status = cam_devq_resize(devq, devq->alloc_queue.array_size + 1);
 4738 
 4739         if (status != CAM_REQ_CMP) {
 4740                 device = NULL;
 4741         } else {
 4742                 device = (struct cam_ed *)malloc(sizeof(*device),
 4743                                                  M_DEVBUF, M_NOWAIT);
 4744         }
 4745 
 4746         if (device != NULL) {
 4747                 struct cam_ed *cur_device;
 4748 
 4749                 cam_init_pinfo(&device->alloc_ccb_entry.pinfo);
 4750                 device->alloc_ccb_entry.device = device;
 4751                 cam_init_pinfo(&device->send_ccb_entry.pinfo);
 4752                 device->send_ccb_entry.device = device;
 4753                 device->target = target;
 4754                 device->lun_id = lun_id;
 4755                 /* Initialize our queues */
 4756                 if (camq_init(&device->drvq, 0) != 0) {
 4757                         free(device, M_DEVBUF);
 4758                         return (NULL);
 4759                 }
 4760                 if (cam_ccbq_init(&device->ccbq,
 4761                                   bus->sim->max_dev_openings) != 0) {
 4762                         camq_fini(&device->drvq);
 4763                         free(device, M_DEVBUF);
 4764                         return (NULL);
 4765                 }
 4766                 SLIST_INIT(&device->asyncs);
 4767                 SLIST_INIT(&device->periphs);
 4768                 device->generation = 0;
 4769                 device->owner = NULL;
 4770                 /*
 4771                  * Take the default quirk entry until we have inquiry
 4772                  * data and can determine a better quirk to use.
 4773                  */
 4774                 device->quirk = &xpt_quirk_table[xpt_quirk_table_size - 1];
 4775                 bzero(&device->inq_data, sizeof(device->inq_data));
 4776                 device->inq_flags = 0;
 4777                 device->queue_flags = 0;
 4778                 device->serial_num = NULL;
 4779                 device->serial_num_len = 0;
 4780                 device->qfrozen_cnt = 0;
 4781                 device->flags = CAM_DEV_UNCONFIGURED;
 4782                 device->tag_delay_count = 0;
 4783                 device->refcount = 1;
 4784                 callout_handle_init(&device->c_handle);
 4785 
 4786                 /*
 4787                  * Hold a reference to our parent target so it
 4788                  * will not go away before we do.
 4789                  */
 4790                 target->refcount++;
 4791 
 4792                 /*
 4793                  * XXX should be limited by number of CCBs this bus can
 4794                  * do.
 4795                  */
 4796                 xpt_max_ccbs += device->ccbq.devq_openings;
 4797                 /* Insertion sort into our target's device list */
 4798                 cur_device = TAILQ_FIRST(&target->ed_entries);
 4799                 while (cur_device != NULL && cur_device->lun_id < lun_id)
 4800                         cur_device = TAILQ_NEXT(cur_device, links);
 4801                 if (cur_device != NULL) {
 4802                         TAILQ_INSERT_BEFORE(cur_device, device, links);
 4803                 } else {
 4804                         TAILQ_INSERT_TAIL(&target->ed_entries, device, links);
 4805                 }
 4806                 target->generation++;
 4807         }
 4808         return (device);
 4809 }
 4810 
 4811 static void
 4812 xpt_release_device(struct cam_eb *bus, struct cam_et *target,
 4813                    struct cam_ed *device)
 4814 {
 4815         int s;
 4816 
 4817         s = splcam();
 4818         if ((--device->refcount == 0)
 4819          && ((device->flags & CAM_DEV_UNCONFIGURED) != 0)) {
 4820                 struct cam_devq *devq;
 4821 
 4822                 if (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX
 4823                  || device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX)
 4824                         panic("Removing device while still queued for ccbs");
 4825 
 4826                 if ((device->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0)
 4827                                 untimeout(xpt_release_devq_timeout, device,
 4828                                           device->c_handle);
 4829 
 4830                 TAILQ_REMOVE(&target->ed_entries, device,links);
 4831                 target->generation++;
 4832                 xpt_max_ccbs -= device->ccbq.devq_openings;
 4833                 /* Release our slot in the devq */
 4834                 devq = bus->sim->devq;
 4835                 cam_devq_resize(devq, devq->alloc_queue.array_size - 1);
 4836                 splx(s);
 4837                 camq_fini(&device->drvq);
 4838                 camq_fini(&device->ccbq.queue);
 4839                 free(device, M_DEVBUF);
 4840                 xpt_release_target(bus, target);
 4841         } else
 4842                 splx(s);
 4843 }
 4844 
 4845 static u_int32_t
 4846 xpt_dev_ccbq_resize(struct cam_path *path, int newopenings)
 4847 {
 4848         int     s;
 4849         int     diff;
 4850         int     result;
 4851         struct  cam_ed *dev;
 4852 
 4853         dev = path->device;
 4854         s = splsoftcam();
 4855 
 4856         diff = newopenings - (dev->ccbq.dev_active + dev->ccbq.dev_openings);
 4857         result = cam_ccbq_resize(&dev->ccbq, newopenings);
 4858         if (result == CAM_REQ_CMP && (diff < 0)) {
 4859                 dev->flags |= CAM_DEV_RESIZE_QUEUE_NEEDED;
 4860         }
 4861         /* Adjust the global limit */
 4862         xpt_max_ccbs += diff;
 4863         splx(s);
 4864         return (result);
 4865 }
 4866 
 4867 static struct cam_eb *
 4868 xpt_find_bus(path_id_t path_id)
 4869 {
 4870         struct cam_eb *bus;
 4871 
 4872         for (bus = TAILQ_FIRST(&xpt_busses);
 4873              bus != NULL;
 4874              bus = TAILQ_NEXT(bus, links)) {
 4875                 if (bus->path_id == path_id) {
 4876                         bus->refcount++;
 4877                         break;
 4878                 }
 4879         }
 4880         return (bus);
 4881 }
 4882 
 4883 static struct cam_et *
 4884 xpt_find_target(struct cam_eb *bus, target_id_t target_id)
 4885 {
 4886         struct cam_et *target;
 4887 
 4888         for (target = TAILQ_FIRST(&bus->et_entries);
 4889              target != NULL;
 4890              target = TAILQ_NEXT(target, links)) {
 4891                 if (target->target_id == target_id) {
 4892                         target->refcount++;
 4893                         break;
 4894                 }
 4895         }
 4896         return (target);
 4897 }
 4898 
 4899 static struct cam_ed *
 4900 xpt_find_device(struct cam_et *target, lun_id_t lun_id)
 4901 {
 4902         struct cam_ed *device;
 4903 
 4904         for (device = TAILQ_FIRST(&target->ed_entries);
 4905              device != NULL;
 4906              device = TAILQ_NEXT(device, links)) {
 4907                 if (device->lun_id == lun_id) {
 4908                         device->refcount++;
 4909                         break;
 4910                 }
 4911         }
 4912         return (device);
 4913 }
 4914 
 4915 typedef struct {
 4916         union   ccb *request_ccb;
 4917         struct  ccb_pathinq *cpi;
 4918         int     counter;
 4919 } xpt_scan_bus_info;
 4920 
 4921 static void
 4922 xpt_scan_bus(struct cam_periph *periph, union ccb *request_ccb)
 4923 {
 4924         CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE,
 4925                   ("xpt_scan_bus\n"));
 4926         switch (request_ccb->ccb_h.func_code) {
 4927         case XPT_SCAN_BUS:
 4928         {
 4929                 xpt_scan_bus_info *scan_info;
 4930                 union   ccb *work_ccb;
 4931                 struct  cam_path *path;
 4932                 u_int i;
 4933                 u_int max_target;
 4934                 u_int initiator_id;
 4935 
 4936                 /* Find out the characteristics of the bus */
 4937                 work_ccb = xpt_alloc_ccb();
 4938                 xpt_setup_ccb(&work_ccb->ccb_h, request_ccb->ccb_h.path,
 4939                     request_ccb->ccb_h.pinfo.priority);
 4940                 work_ccb->ccb_h.func_code = XPT_PATH_INQ;
 4941                 xpt_action(work_ccb);
 4942                 if (work_ccb->ccb_h.status != CAM_REQ_CMP) {
 4943                         request_ccb->ccb_h.status = work_ccb->ccb_h.status;
 4944                         xpt_free_ccb(work_ccb);
 4945                         xpt_done(request_ccb);
 4946                         return;
 4947                 }
 4948 
 4949                 if ((work_ccb->cpi.hba_misc & PIM_NOINITIATOR) != 0) {
 4950                         /*
 4951                          * Can't scan the bus on an adapter that
 4952                          * cannot perform the initiator role.
 4953                          */
 4954                         request_ccb->ccb_h.status = CAM_REQ_CMP;
 4955                         xpt_free_ccb(work_ccb);
 4956                         xpt_done(request_ccb);
 4957                         return;
 4958                 }
 4959 
 4960                 /* Save some state for use while we probe for devices */
 4961                 scan_info = (xpt_scan_bus_info *)
 4962                     malloc(sizeof(xpt_scan_bus_info), M_TEMP, M_WAITOK);
 4963                 scan_info->request_ccb = request_ccb;
 4964                 scan_info->cpi = &work_ccb->cpi;
 4965 
 4966                 /* Cache on our stack so we can work asynchronously */
 4967                 max_target = scan_info->cpi->max_target;
 4968                 initiator_id = scan_info->cpi->initiator_id;
 4969 
 4970 
 4971                 /*
 4972                  * We can scan all targets in parallel, or do it sequentially.
 4973                  */
 4974                 if (scan_info->cpi->hba_misc & PIM_SEQSCAN) {
 4975                         max_target = 0;
 4976                         scan_info->counter = 0;
 4977                 } else {
 4978                         scan_info->counter = scan_info->cpi->max_target + 1;
 4979                         if (scan_info->cpi->initiator_id < scan_info->counter) {
 4980                                 scan_info->counter--;
 4981                         }
 4982                 }
 4983                 
 4984                 for (i = 0; i <= max_target; i++) {
 4985                         cam_status status;
 4986                         if (i == initiator_id)
 4987                                 continue;
 4988 
 4989                         status = xpt_create_path(&path, xpt_periph,
 4990                                                  request_ccb->ccb_h.path_id,
 4991                                                  i, 0);
 4992                         if (status != CAM_REQ_CMP) {
 4993                                 printf("xpt_scan_bus: xpt_create_path failed"
 4994                                        " with status %#x, bus scan halted\n",
 4995                                        status);
 4996                                 free(scan_info, M_TEMP);
 4997                                 request_ccb->ccb_h.status = status;
 4998                                 xpt_free_ccb(work_ccb);
 4999                                 xpt_done(request_ccb);
 5000                                 break;
 5001                         }
 5002                         work_ccb = xpt_alloc_ccb();
 5003                         xpt_setup_ccb(&work_ccb->ccb_h, path,
 5004                             request_ccb->ccb_h.pinfo.priority);
 5005                         work_ccb->ccb_h.func_code = XPT_SCAN_LUN;
 5006                         work_ccb->ccb_h.cbfcnp = xpt_scan_bus;
 5007                         work_ccb->ccb_h.ppriv_ptr0 = scan_info;
 5008                         work_ccb->crcn.flags = request_ccb->crcn.flags;
 5009                         xpt_action(work_ccb);
 5010                 }
 5011                 break;
 5012         }
 5013         case XPT_SCAN_LUN:
 5014         {
 5015                 cam_status status;
 5016                 struct cam_path *path;
 5017                 xpt_scan_bus_info *scan_info;
 5018                 path_id_t path_id;
 5019                 target_id_t target_id;
 5020                 lun_id_t lun_id;
 5021 
 5022                 /* Reuse the same CCB to query if a device was really found */
 5023                 scan_info = (xpt_scan_bus_info *)request_ccb->ccb_h.ppriv_ptr0;
 5024                 xpt_setup_ccb(&request_ccb->ccb_h, request_ccb->ccb_h.path,
 5025                               request_ccb->ccb_h.pinfo.priority);
 5026                 request_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
 5027 
 5028                 path_id = request_ccb->ccb_h.path_id;
 5029                 target_id = request_ccb->ccb_h.target_id;
 5030                 lun_id = request_ccb->ccb_h.target_lun;
 5031                 xpt_action(request_ccb);
 5032 
 5033 #if 0
 5034                 printf("xpt_scan_bus: got back probe from %d:%d:%d\n",
 5035                         path_id, target_id, lun_id);
 5036 #endif
 5037 
 5038                 if (request_ccb->ccb_h.status != CAM_REQ_CMP) {
 5039                         struct cam_ed *device;
 5040                         struct cam_et *target;
 5041                         int s, phl;
 5042 
 5043                         /*
 5044                          * If we already probed lun 0 successfully, or
 5045                          * we have additional configured luns on this
 5046                          * target that might have "gone away", go onto
 5047                          * the next lun.
 5048                          */
 5049                         target = request_ccb->ccb_h.path->target;
 5050                         /*
 5051                          * We may touch devices that we don't
 5052                          * hold references too, so ensure they
 5053                          * don't disappear out from under us.
 5054                          * The target above is referenced by the
 5055                          * path in the request ccb.
 5056                          */
 5057                         phl = 0;
 5058                         s = splcam();
 5059                         device = TAILQ_FIRST(&target->ed_entries);
 5060                         if (device != NULL) {
 5061                                 phl = device->quirk->quirks & CAM_QUIRK_HILUNS;
 5062                                 if (device->lun_id == 0)
 5063                                         device = TAILQ_NEXT(device, links);
 5064                         }
 5065                         splx(s);
 5066                         if ((lun_id != 0) || (device != NULL)) {
 5067                                 if (lun_id < (CAM_SCSI2_MAXLUN-1) || phl)
 5068                                         lun_id++;
 5069                         }
 5070                 } else {
 5071                         struct cam_ed *device;
 5072                         
 5073                         device = request_ccb->ccb_h.path->device;
 5074 
 5075                         if ((device->quirk->quirks & CAM_QUIRK_NOLUNS) == 0) {
 5076                                 /* Try the next lun */
 5077                                 if (lun_id < (CAM_SCSI2_MAXLUN-1) ||
 5078                                     (device->quirk->quirks & CAM_QUIRK_HILUNS))
 5079                                         lun_id++;
 5080                         }
 5081                 }
 5082 
 5083                 /*
 5084                  * Free the current request path- we're done with it.
 5085                  */
 5086                 xpt_free_path(request_ccb->ccb_h.path);
 5087 
 5088                 /*
 5089                  * Check to see if we scan any further luns.
 5090                  */
 5091                 if (lun_id == request_ccb->ccb_h.target_lun
 5092                  || lun_id > scan_info->cpi->max_lun) {
 5093                         int done;
 5094 
 5095  hop_again:
 5096                         done = 0;
 5097                         if (scan_info->cpi->hba_misc & PIM_SEQSCAN) {
 5098                                 scan_info->counter++;
 5099                                 if (scan_info->counter == 
 5100                                     scan_info->cpi->initiator_id) {
 5101                                         scan_info->counter++;
 5102                                 }
 5103                                 if (scan_info->counter >=
 5104                                     scan_info->cpi->max_target+1) {
 5105                                         done = 1;
 5106                                 }
 5107                         } else {
 5108                                 scan_info->counter--;
 5109                                 if (scan_info->counter == 0) {
 5110                                         done = 1;
 5111                                 }
 5112                         }
 5113                         if (done) {
 5114                                 xpt_free_ccb(request_ccb);
 5115                                 xpt_free_ccb((union ccb *)scan_info->cpi);
 5116                                 request_ccb = scan_info->request_ccb;
 5117                                 free(scan_info, M_TEMP);
 5118                                 request_ccb->ccb_h.status = CAM_REQ_CMP;
 5119                                 xpt_done(request_ccb);
 5120                                 break;
 5121                         }
 5122 
 5123                         if ((scan_info->cpi->hba_misc & PIM_SEQSCAN) == 0) {
 5124                                 break;
 5125                         }
 5126                         status = xpt_create_path(&path, xpt_periph,
 5127                             scan_info->request_ccb->ccb_h.path_id,
 5128                             scan_info->counter, 0);
 5129                         if (status != CAM_REQ_CMP) {
 5130                                 printf("xpt_scan_bus: xpt_create_path failed"
 5131                                     " with status %#x, bus scan halted\n",
 5132                                     status);
 5133                                 xpt_free_ccb(request_ccb);
 5134                                 xpt_free_ccb((union ccb *)scan_info->cpi);
 5135                                 request_ccb = scan_info->request_ccb;
 5136                                 free(scan_info, M_TEMP);
 5137                                 request_ccb->ccb_h.status = status;
 5138                                 xpt_done(request_ccb);
 5139                                 break;
 5140                         }
 5141                         xpt_setup_ccb(&request_ccb->ccb_h, path,
 5142                             request_ccb->ccb_h.pinfo.priority);
 5143                         request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
 5144                         request_ccb->ccb_h.cbfcnp = xpt_scan_bus;
 5145                         request_ccb->ccb_h.ppriv_ptr0 = scan_info;
 5146                         request_ccb->crcn.flags =
 5147                             scan_info->request_ccb->crcn.flags;
 5148                 } else {
 5149                         status = xpt_create_path(&path, xpt_periph,
 5150                             path_id, target_id, lun_id);
 5151                         if (status != CAM_REQ_CMP) {
 5152                                 printf("xpt_scan_bus: xpt_create_path failed "
 5153                                        "with status %#x, halting LUN scan\n",
 5154                                        status);
 5155                                 goto hop_again;
 5156                         }
 5157                         xpt_setup_ccb(&request_ccb->ccb_h, path,
 5158                               request_ccb->ccb_h.pinfo.priority);
 5159                         request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
 5160                         request_ccb->ccb_h.cbfcnp = xpt_scan_bus;
 5161                         request_ccb->ccb_h.ppriv_ptr0 = scan_info;
 5162                         request_ccb->crcn.flags =
 5163                                 scan_info->request_ccb->crcn.flags;
 5164                 }
 5165                 xpt_action(request_ccb);
 5166                 break;
 5167         }
 5168         default:
 5169                 break;
 5170         }
 5171 }
 5172 
 5173 typedef enum {
 5174         PROBE_TUR,
 5175         PROBE_INQUIRY,
 5176         PROBE_FULL_INQUIRY,
 5177         PROBE_MODE_SENSE,
 5178         PROBE_SERIAL_NUM,
 5179         PROBE_TUR_FOR_NEGOTIATION
 5180 } probe_action;
 5181 
 5182 typedef enum {
 5183         PROBE_INQUIRY_CKSUM     = 0x01,
 5184         PROBE_SERIAL_CKSUM      = 0x02,
 5185         PROBE_NO_ANNOUNCE       = 0x04
 5186 } probe_flags;
 5187 
 5188 typedef struct {
 5189         TAILQ_HEAD(, ccb_hdr) request_ccbs;
 5190         probe_action    action;
 5191         union ccb       saved_ccb;
 5192         probe_flags     flags;
 5193         MD5_CTX         context;
 5194         u_int8_t        digest[16];
 5195 } probe_softc;
 5196 
 5197 static void
 5198 xpt_scan_lun(struct cam_periph *periph, struct cam_path *path,
 5199              cam_flags flags, union ccb *request_ccb)
 5200 {
 5201         struct ccb_pathinq cpi;
 5202         cam_status status;
 5203         struct cam_path *new_path;
 5204         struct cam_periph *old_periph;
 5205         int s;
 5206         
 5207         CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE,
 5208                   ("xpt_scan_lun\n"));
 5209         
 5210         xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
 5211         cpi.ccb_h.func_code = XPT_PATH_INQ;
 5212         xpt_action((union ccb *)&cpi);
 5213 
 5214         if (cpi.ccb_h.status != CAM_REQ_CMP) {
 5215                 if (request_ccb != NULL) {
 5216                         request_ccb->ccb_h.status = cpi.ccb_h.status;
 5217                         xpt_done(request_ccb);
 5218                 }
 5219                 return;
 5220         }
 5221 
 5222         if ((cpi.hba_misc & PIM_NOINITIATOR) != 0) {
 5223                 /*
 5224                  * Can't scan the bus on an adapter that
 5225                  * cannot perform the initiator role.
 5226                  */
 5227                 if (request_ccb != NULL) {
 5228                         request_ccb->ccb_h.status = CAM_REQ_CMP;
 5229                         xpt_done(request_ccb);
 5230                 }
 5231                 return;
 5232         }
 5233 
 5234         if (request_ccb == NULL) {
 5235                 request_ccb = malloc(sizeof(union ccb), M_TEMP, M_NOWAIT);
 5236                 if (request_ccb == NULL) {
 5237                         xpt_print_path(path);
 5238                         printf("xpt_scan_lun: can't allocate CCB, can't "
 5239                                "continue\n");
 5240                         return;
 5241                 }
 5242                 new_path = malloc(sizeof(*new_path), M_TEMP, M_NOWAIT);
 5243                 if (new_path == NULL) {
 5244                         xpt_print_path(path);
 5245                         printf("xpt_scan_lun: can't allocate path, can't "
 5246                                "continue\n");
 5247                         free(request_ccb, M_TEMP);
 5248                         return;
 5249                 }
 5250                 status = xpt_compile_path(new_path, xpt_periph,
 5251                                           path->bus->path_id,
 5252                                           path->target->target_id,
 5253                                           path->device->lun_id);
 5254 
 5255                 if (status != CAM_REQ_CMP) {
 5256                         xpt_print_path(path);
 5257                         printf("xpt_scan_lun: can't compile path, can't "
 5258                                "continue\n");
 5259                         free(request_ccb, M_TEMP);
 5260                         free(new_path, M_TEMP);
 5261                         return;
 5262                 }
 5263                 xpt_setup_ccb(&request_ccb->ccb_h, new_path, /*priority*/ 1);
 5264                 request_ccb->ccb_h.cbfcnp = xptscandone;
 5265                 request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
 5266                 request_ccb->crcn.flags = flags;
 5267         }
 5268 
 5269         s = splsoftcam();
 5270         if ((old_periph = cam_periph_find(path, "probe")) != NULL) {
 5271                 probe_softc *softc;
 5272 
 5273                 softc = (probe_softc *)old_periph->softc;
 5274                 TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h,
 5275                                   periph_links.tqe);
 5276         } else {
 5277                 status = cam_periph_alloc(proberegister, NULL, probecleanup,
 5278                                           probestart, "probe",
 5279                                           CAM_PERIPH_BIO,
 5280                                           request_ccb->ccb_h.path, NULL, 0,
 5281                                           request_ccb);
 5282 
 5283                 if (status != CAM_REQ_CMP) {
 5284                         xpt_print_path(path);
 5285                         printf("xpt_scan_lun: cam_alloc_periph returned an "
 5286                                "error, can't continue probe\n");
 5287                         request_ccb->ccb_h.status = status;
 5288                         xpt_done(request_ccb);
 5289                 }
 5290         }
 5291         splx(s);
 5292 }
 5293 
 5294 static void
 5295 xptscandone(struct cam_periph *periph, union ccb *done_ccb)
 5296 {
 5297         xpt_release_path(done_ccb->ccb_h.path);
 5298         free(done_ccb->ccb_h.path, M_TEMP);
 5299         free(done_ccb, M_TEMP);
 5300 }
 5301 
 5302 static cam_status
 5303 proberegister(struct cam_periph *periph, void *arg)
 5304 {
 5305         union ccb *request_ccb; /* CCB representing the probe request */
 5306         probe_softc *softc;
 5307 
 5308         request_ccb = (union ccb *)arg;
 5309         if (periph == NULL) {
 5310                 printf("proberegister: periph was NULL!!\n");
 5311                 return(CAM_REQ_CMP_ERR);
 5312         }
 5313 
 5314         if (request_ccb == NULL) {
 5315                 printf("proberegister: no probe CCB, "
 5316                        "can't register device\n");
 5317                 return(CAM_REQ_CMP_ERR);
 5318         }
 5319 
 5320         softc = (probe_softc *)malloc(sizeof(*softc), M_TEMP, M_NOWAIT);
 5321 
 5322         if (softc == NULL) {
 5323                 printf("proberegister: Unable to probe new device. "
 5324                        "Unable to allocate softc\n");                           
 5325                 return(CAM_REQ_CMP_ERR);
 5326         }
 5327         TAILQ_INIT(&softc->request_ccbs);
 5328         TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h,
 5329                           periph_links.tqe);
 5330         softc->flags = 0;
 5331         periph->softc = softc;
 5332         cam_periph_acquire(periph);
 5333         /*
 5334          * Ensure we've waited at least a bus settle
 5335          * delay before attempting to probe the device.
 5336          * For HBAs that don't do bus resets, this won't make a difference.
 5337          */
 5338         cam_periph_freeze_after_event(periph, &periph->path->bus->last_reset,
 5339                                       SCSI_DELAY);
 5340         probeschedule(periph);
 5341         return(CAM_REQ_CMP);
 5342 }
 5343 
 5344 static void
 5345 probeschedule(struct cam_periph *periph)
 5346 {
 5347         struct ccb_pathinq cpi;
 5348         union ccb *ccb;
 5349         probe_softc *softc;
 5350 
 5351         softc = (probe_softc *)periph->softc;
 5352         ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs);
 5353 
 5354         xpt_setup_ccb(&cpi.ccb_h, periph->path, /*priority*/1);
 5355         cpi.ccb_h.func_code = XPT_PATH_INQ;
 5356         xpt_action((union ccb *)&cpi);
 5357 
 5358         /*
 5359          * If a device has gone away and another device, or the same one,
 5360          * is back in the same place, it should have a unit attention
 5361          * condition pending.  It will not report the unit attention in
 5362          * response to an inquiry, which may leave invalid transfer
 5363          * negotiations in effect.  The TUR will reveal the unit attention
 5364          * condition.  Only send the TUR for lun 0, since some devices 
 5365          * will get confused by commands other than inquiry to non-existent
 5366          * luns.  If you think a device has gone away start your scan from
 5367          * lun 0.  This will insure that any bogus transfer settings are
 5368          * invalidated.
 5369          *
 5370          * If we haven't seen the device before and the controller supports
 5371          * some kind of transfer negotiation, negotiate with the first
 5372          * sent command if no bus reset was performed at startup.  This
 5373          * ensures that the device is not confused by transfer negotiation
 5374          * settings left over by loader or BIOS action.
 5375          */
 5376         if (((ccb->ccb_h.path->device->flags & CAM_DEV_UNCONFIGURED) == 0)
 5377          && (ccb->ccb_h.target_lun == 0)) {
 5378                 softc->action = PROBE_TUR;
 5379         } else if ((cpi.hba_inquiry & (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE)) != 0
 5380               && (cpi.hba_misc & PIM_NOBUSRESET) != 0) {
 5381                 proberequestdefaultnegotiation(periph);
 5382                 softc->action = PROBE_INQUIRY;
 5383         } else {
 5384                 softc->action = PROBE_INQUIRY;
 5385         }
 5386 
 5387         if (ccb->crcn.flags & CAM_EXPECT_INQ_CHANGE)
 5388                 softc->flags |= PROBE_NO_ANNOUNCE;
 5389         else
 5390                 softc->flags &= ~PROBE_NO_ANNOUNCE;
 5391 
 5392         xpt_schedule(periph, ccb->ccb_h.pinfo.priority);
 5393 }
 5394 
 5395 static void
 5396 probestart(struct cam_periph *periph, union ccb *start_ccb)
 5397 {
 5398         /* Probe the device that our peripheral driver points to */
 5399         struct ccb_scsiio *csio;
 5400         probe_softc *softc;
 5401 
 5402         CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probestart\n"));
 5403 
 5404         softc = (probe_softc *)periph->softc;
 5405         csio = &start_ccb->csio;
 5406 
 5407         switch (softc->action) {
 5408         case PROBE_TUR:
 5409         case PROBE_TUR_FOR_NEGOTIATION:
 5410         {
 5411                 scsi_test_unit_ready(csio,
 5412                                      /*retries*/4,
 5413                                      probedone,
 5414                                      MSG_SIMPLE_Q_TAG,
 5415                                      SSD_FULL_SIZE,
 5416                                      /*timeout*/60000);
 5417                 break;
 5418         }
 5419         case PROBE_INQUIRY:
 5420         case PROBE_FULL_INQUIRY:
 5421         {
 5422                 u_int inquiry_len;
 5423                 struct scsi_inquiry_data *inq_buf;
 5424 
 5425                 inq_buf = &periph->path->device->inq_data;
 5426                 /*
 5427                  * If the device is currently configured, we calculate an
 5428                  * MD5 checksum of the inquiry data, and if the serial number
 5429                  * length is greater than 0, add the serial number data
 5430                  * into the checksum as well.  Once the inquiry and the
 5431                  * serial number check finish, we attempt to figure out
 5432                  * whether we still have the same device.
 5433                  */
 5434                 if ((periph->path->device->flags & CAM_DEV_UNCONFIGURED) == 0) {
 5435                         
 5436                         MD5Init(&softc->context);
 5437                         MD5Update(&softc->context, (unsigned char *)inq_buf,
 5438                                   sizeof(struct scsi_inquiry_data));
 5439                         softc->flags |= PROBE_INQUIRY_CKSUM;
 5440                         if (periph->path->device->serial_num_len > 0) {
 5441                                 MD5Update(&softc->context,
 5442                                           periph->path->device->serial_num,
 5443                                           periph->path->device->serial_num_len);
 5444                                 softc->flags |= PROBE_SERIAL_CKSUM;
 5445                         }
 5446                         MD5Final(softc->digest, &softc->context);
 5447                 } 
 5448 
 5449                 if (softc->action == PROBE_INQUIRY)
 5450                         inquiry_len = SHORT_INQUIRY_LENGTH;
 5451                 else
 5452                         inquiry_len = inq_buf->additional_length + 5;
 5453         
 5454                 scsi_inquiry(csio,
 5455                              /*retries*/4,
 5456                              probedone,
 5457                              MSG_SIMPLE_Q_TAG,
 5458                              (u_int8_t *)inq_buf,
 5459                              inquiry_len,
 5460                              /*evpd*/FALSE,
 5461                              /*page_code*/0,
 5462                              SSD_MIN_SIZE,
 5463                              /*timeout*/60 * 1000);
 5464                 break;
 5465         }
 5466         case PROBE_MODE_SENSE:
 5467         {
 5468                 void  *mode_buf;
 5469                 int    mode_buf_len;
 5470 
 5471                 mode_buf_len = sizeof(struct scsi_mode_header_6)
 5472                              + sizeof(struct scsi_mode_blk_desc)
 5473                              + sizeof(struct scsi_control_page);
 5474                 mode_buf = malloc(mode_buf_len, M_TEMP, M_NOWAIT);
 5475                 if (mode_buf != NULL) {
 5476                         scsi_mode_sense(csio,
 5477                                         /*retries*/4,
 5478                                         probedone,
 5479                                         MSG_SIMPLE_Q_TAG,
 5480                                         /*dbd*/FALSE,
 5481                                         SMS_PAGE_CTRL_CURRENT,
 5482                                         SMS_CONTROL_MODE_PAGE,
 5483                                         mode_buf,
 5484                                         mode_buf_len,
 5485                                         SSD_FULL_SIZE,
 5486                                         /*timeout*/60000);
 5487                         break;
 5488                 }
 5489                 xpt_print_path(periph->path);
 5490                 printf("Unable to mode sense control page - malloc failure\n");
 5491                 softc->action = PROBE_SERIAL_NUM;
 5492                 /* FALLTHROUGH */
 5493         }
 5494         case PROBE_SERIAL_NUM:
 5495         {
 5496                 struct scsi_vpd_unit_serial_number *serial_buf;
 5497                 struct cam_ed* device;
 5498 
 5499                 serial_buf = NULL;
 5500                 device = periph->path->device;
 5501                 device->serial_num = NULL;
 5502                 device->serial_num_len = 0;
 5503 
 5504                 if ((device->quirk->quirks & CAM_QUIRK_NOSERIAL) == 0)
 5505                         serial_buf = (struct scsi_vpd_unit_serial_number *)
 5506                                 malloc(sizeof(*serial_buf), M_TEMP,
 5507                                         M_NOWAIT | M_ZERO);
 5508 
 5509                 if (serial_buf != NULL) {
 5510                         scsi_inquiry(csio,
 5511                                      /*retries*/4,
 5512                                      probedone,
 5513                                      MSG_SIMPLE_Q_TAG,
 5514                                      (u_int8_t *)serial_buf,
 5515                                      sizeof(*serial_buf),
 5516                                      /*evpd*/TRUE,
 5517                                      SVPD_UNIT_SERIAL_NUMBER,
 5518                                      SSD_MIN_SIZE,
 5519                                      /*timeout*/60 * 1000);
 5520                         break;
 5521                 }
 5522                 /*
 5523                  * We'll have to do without, let our probedone
 5524                  * routine finish up for us.
 5525                  */
 5526                 start_ccb->csio.data_ptr = NULL;
 5527                 probedone(periph, start_ccb);
 5528                 return;
 5529         }
 5530         }
 5531         xpt_action(start_ccb);
 5532 }
 5533 
 5534 static void
 5535 proberequestdefaultnegotiation(struct cam_periph *periph)
 5536 {
 5537         struct ccb_trans_settings cts;
 5538 
 5539         xpt_setup_ccb(&cts.ccb_h, periph->path, /*priority*/1);
 5540         cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
 5541         cts.flags = CCB_TRANS_USER_SETTINGS;
 5542         xpt_action((union ccb *)&cts);
 5543         cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
 5544         cts.flags &= ~CCB_TRANS_USER_SETTINGS;
 5545         cts.flags |= CCB_TRANS_CURRENT_SETTINGS;
 5546         xpt_action((union ccb *)&cts);
 5547 }
 5548 
 5549 static void
 5550 probedone(struct cam_periph *periph, union ccb *done_ccb)
 5551 {
 5552         probe_softc *softc;
 5553         struct cam_path *path;
 5554         u_int32_t  priority;
 5555 
 5556         CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probedone\n"));
 5557 
 5558         softc = (probe_softc *)periph->softc;
 5559         path = done_ccb->ccb_h.path;
 5560         priority = done_ccb->ccb_h.pinfo.priority;
 5561 
 5562         switch (softc->action) {
 5563         case PROBE_TUR:
 5564         {
 5565                 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
 5566 
 5567                         if (cam_periph_error(done_ccb, 0,
 5568                                              SF_NO_PRINT, NULL) == ERESTART)
 5569                                 return;
 5570                         else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
 5571                                 /* Don't wedge the queue */
 5572                                 xpt_release_devq(done_ccb->ccb_h.path,
 5573                                                  /*count*/1,
 5574                                                  /*run_queue*/TRUE);
 5575                 }
 5576                 softc->action = PROBE_INQUIRY;
 5577                 xpt_release_ccb(done_ccb);
 5578                 xpt_schedule(periph, priority);
 5579                 return;
 5580         }
 5581         case PROBE_INQUIRY:
 5582         case PROBE_FULL_INQUIRY:
 5583         {
 5584                 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
 5585                         struct scsi_inquiry_data *inq_buf;
 5586                         u_int8_t periph_qual;
 5587 
 5588                         path->device->flags |= CAM_DEV_INQUIRY_DATA_VALID;
 5589                         inq_buf = &path->device->inq_data;
 5590 
 5591                         periph_qual = SID_QUAL(inq_buf);
 5592                         
 5593                         switch(periph_qual) {
 5594                         case SID_QUAL_LU_CONNECTED:
 5595                         {
 5596                                 u_int8_t alen;
 5597 
 5598                                 /*
 5599                                  * We conservatively request only
 5600                                  * SHORT_INQUIRY_LEN bytes of inquiry
 5601                                  * information during our first try
 5602                                  * at sending an INQUIRY. If the device
 5603                                  * has more information to give,
 5604                                  * perform a second request specifying
 5605                                  * the amount of information the device
 5606                                  * is willing to give.
 5607                                  */
 5608                                 alen = inq_buf->additional_length;
 5609                                 if (softc->action == PROBE_INQUIRY
 5610                                  && alen > (SHORT_INQUIRY_LENGTH - 5)) {
 5611                                         softc->action = PROBE_FULL_INQUIRY;
 5612                                         xpt_release_ccb(done_ccb);
 5613                                         xpt_schedule(periph, priority);
 5614                                         return;
 5615                                 }
 5616 
 5617                                 xpt_find_quirk(path->device);
 5618 
 5619                                 if ((inq_buf->flags & SID_CmdQue) != 0)
 5620                                         softc->action = PROBE_MODE_SENSE;
 5621                                 else
 5622                                         softc->action = PROBE_SERIAL_NUM;
 5623 
 5624                                 path->device->flags &= ~CAM_DEV_UNCONFIGURED;
 5625 
 5626                                 xpt_release_ccb(done_ccb);
 5627                                 xpt_schedule(periph, priority);
 5628                                 return;
 5629                         }
 5630                         default:
 5631                                 break;
 5632                         }
 5633                 } else if (cam_periph_error(done_ccb, 0,
 5634                                             done_ccb->ccb_h.target_lun > 0
 5635                                             ? SF_RETRY_UA|SF_QUIET_IR
 5636                                             : SF_RETRY_UA,
 5637                                             &softc->saved_ccb) == ERESTART) {
 5638                         return;
 5639                 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
 5640                         /* Don't wedge the queue */
 5641                         xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
 5642                                          /*run_queue*/TRUE);
 5643                 }
 5644                 /*
 5645                  * If we get to this point, we got an error status back
 5646                  * from the inquiry and the error status doesn't require
 5647                  * automatically retrying the command.  Therefore, the
 5648                  * inquiry failed.  If we had inquiry information before
 5649                  * for this device, but this latest inquiry command failed,
 5650                  * the device has probably gone away.  If this device isn't
 5651                  * already marked unconfigured, notify the peripheral
 5652                  * drivers that this device is no more.
 5653                  */
 5654                 if ((path->device->flags & CAM_DEV_UNCONFIGURED) == 0)
 5655                         /* Send the async notification. */
 5656                         xpt_async(AC_LOST_DEVICE, path, NULL);
 5657 
 5658                 xpt_release_ccb(done_ccb);
 5659                 break;
 5660         }
 5661         case PROBE_MODE_SENSE:
 5662         {
 5663                 struct ccb_scsiio *csio;
 5664                 struct scsi_mode_header_6 *mode_hdr;
 5665 
 5666                 csio = &done_ccb->csio;
 5667                 mode_hdr = (struct scsi_mode_header_6 *)csio->data_ptr;
 5668                 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
 5669                         struct scsi_control_page *page;
 5670                         u_int8_t *offset;
 5671 
 5672                         offset = ((u_int8_t *)&mode_hdr[1])
 5673                             + mode_hdr->blk_desc_len;
 5674                         page = (struct scsi_control_page *)offset;
 5675                         path->device->queue_flags = page->queue_flags;
 5676                 } else if (cam_periph_error(done_ccb, 0,
 5677                                             SF_RETRY_UA|SF_NO_PRINT,
 5678                                             &softc->saved_ccb) == ERESTART) {
 5679                         return;
 5680                 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
 5681                         /* Don't wedge the queue */
 5682                         xpt_release_devq(done_ccb->ccb_h.path,
 5683                                          /*count*/1, /*run_queue*/TRUE);
 5684                 }
 5685                 xpt_release_ccb(done_ccb);
 5686                 free(mode_hdr, M_TEMP);
 5687                 softc->action = PROBE_SERIAL_NUM;
 5688                 xpt_schedule(periph, priority);
 5689                 return;
 5690         }
 5691         case PROBE_SERIAL_NUM:
 5692         {
 5693                 struct ccb_scsiio *csio;
 5694                 struct scsi_vpd_unit_serial_number *serial_buf;
 5695                 u_int32_t  priority;
 5696                 int changed;
 5697                 int have_serialnum;
 5698 
 5699                 changed = 1;
 5700                 have_serialnum = 0;
 5701                 csio = &done_ccb->csio;
 5702                 priority = done_ccb->ccb_h.pinfo.priority;
 5703                 serial_buf =
 5704                     (struct scsi_vpd_unit_serial_number *)csio->data_ptr;
 5705 
 5706                 /* Clean up from previous instance of this device */
 5707                 if (path->device->serial_num != NULL) {
 5708                         free(path->device->serial_num, M_DEVBUF);
 5709                         path->device->serial_num = NULL;
 5710                         path->device->serial_num_len = 0;
 5711                 }
 5712 
 5713                 if (serial_buf == NULL) {
 5714                         /*
 5715                          * Don't process the command as it was never sent
 5716                          */
 5717                 } else if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP
 5718                         && (serial_buf->length > 0)) {
 5719 
 5720                         have_serialnum = 1;
 5721                         path->device->serial_num =
 5722                                 (u_int8_t *)malloc((serial_buf->length + 1),
 5723                                                    M_DEVBUF, M_NOWAIT);
 5724                         if (path->device->serial_num != NULL) {
 5725                                 bcopy(serial_buf->serial_num,
 5726                                       path->device->serial_num,
 5727                                       serial_buf->length);
 5728                                 path->device->serial_num_len =
 5729                                     serial_buf->length;
 5730                                 path->device->serial_num[serial_buf->length]
 5731                                     = '\0';
 5732                         }
 5733                 } else if (cam_periph_error(done_ccb, 0,
 5734                                             SF_RETRY_UA|SF_NO_PRINT,
 5735                                             &softc->saved_ccb) == ERESTART) {
 5736                         return;
 5737                 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
 5738                         /* Don't wedge the queue */
 5739                         xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
 5740                                          /*run_queue*/TRUE);
 5741                 }
 5742                 
 5743                 /*
 5744                  * Let's see if we have seen this device before.
 5745                  */
 5746                 if ((softc->flags & PROBE_INQUIRY_CKSUM) != 0) {
 5747                         MD5_CTX context;
 5748                         u_int8_t digest[16];
 5749 
 5750                         MD5Init(&context);
 5751                         
 5752                         MD5Update(&context,
 5753                                   (unsigned char *)&path->device->inq_data,
 5754                                   sizeof(struct scsi_inquiry_data));
 5755 
 5756                         if (have_serialnum)
 5757                                 MD5Update(&context, serial_buf->serial_num,
 5758                                           serial_buf->length);
 5759 
 5760                         MD5Final(digest, &context);
 5761                         if (bcmp(softc->digest, digest, 16) == 0)
 5762                                 changed = 0;
 5763 
 5764                         /*
 5765                          * XXX Do we need to do a TUR in order to ensure
 5766                          *     that the device really hasn't changed???
 5767                          */
 5768                         if ((changed != 0)
 5769                          && ((softc->flags & PROBE_NO_ANNOUNCE) == 0))
 5770                                 xpt_async(AC_LOST_DEVICE, path, NULL);
 5771                 }
 5772                 if (serial_buf != NULL)
 5773                         free(serial_buf, M_TEMP);
 5774 
 5775                 if (changed != 0) {
 5776                         /*
 5777                          * Now that we have all the necessary
 5778                          * information to safely perform transfer
 5779                          * negotiations... Controllers don't perform
 5780                          * any negotiation or tagged queuing until
 5781                          * after the first XPT_SET_TRAN_SETTINGS ccb is
 5782                          * received.  So, on a new device, just retreive
 5783                          * the user settings, and set them as the current
 5784                          * settings to set the device up.
 5785                          */
 5786                         proberequestdefaultnegotiation(periph);
 5787                         xpt_release_ccb(done_ccb);
 5788 
 5789                         /*
 5790                          * Perform a TUR to allow the controller to
 5791                          * perform any necessary transfer negotiation.
 5792                          */
 5793                         softc->action = PROBE_TUR_FOR_NEGOTIATION;
 5794                         xpt_schedule(periph, priority);
 5795                         return;
 5796                 }
 5797                 xpt_release_ccb(done_ccb);
 5798                 break;
 5799         }
 5800         case PROBE_TUR_FOR_NEGOTIATION:
 5801                 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
 5802                         /* Don't wedge the queue */
 5803                         xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
 5804                                          /*run_queue*/TRUE);
 5805                 }
 5806 
 5807                 path->device->flags &= ~CAM_DEV_UNCONFIGURED;
 5808 
 5809                 if ((softc->flags & PROBE_NO_ANNOUNCE) == 0) {
 5810                         /* Inform the XPT that a new device has been found */
 5811                         done_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
 5812                         xpt_action(done_ccb);
 5813 
 5814                         xpt_async(AC_FOUND_DEVICE, xpt_periph->path, done_ccb);
 5815                 }
 5816                 xpt_release_ccb(done_ccb);
 5817                 break;
 5818         }
 5819         done_ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs);
 5820         TAILQ_REMOVE(&softc->request_ccbs, &done_ccb->ccb_h, periph_links.tqe);
 5821         done_ccb->ccb_h.status = CAM_REQ_CMP;
 5822         xpt_done(done_ccb);
 5823         if (TAILQ_FIRST(&softc->request_ccbs) == NULL) {
 5824                 cam_periph_invalidate(periph);
 5825                 cam_periph_release(periph);
 5826         } else {
 5827                 probeschedule(periph);
 5828         }
 5829 }
 5830 
 5831 static void
 5832 probecleanup(struct cam_periph *periph)
 5833 {
 5834         free(periph->softc, M_TEMP);
 5835 }
 5836 
 5837 static void
 5838 xpt_find_quirk(struct cam_ed *device)
 5839 {
 5840         caddr_t match;
 5841 
 5842         match = cam_quirkmatch((caddr_t)&device->inq_data,
 5843                                (caddr_t)xpt_quirk_table,
 5844                                sizeof(xpt_quirk_table)/sizeof(*xpt_quirk_table),
 5845                                sizeof(*xpt_quirk_table), scsi_inquiry_match);
 5846 
 5847         if (match == NULL)
 5848                 panic("xpt_find_quirk: device didn't match wildcard entry!!");
 5849 
 5850         device->quirk = (struct xpt_quirk_entry *)match;
 5851 }
 5852 
 5853 static void
 5854 xpt_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_ed *device,
 5855                           int async_update)
 5856 {
 5857         struct  cam_sim *sim;
 5858         int     qfrozen;
 5859 
 5860         sim = cts->ccb_h.path->bus->sim;
 5861         if (async_update == FALSE) {
 5862                 struct  scsi_inquiry_data *inq_data;
 5863                 struct  ccb_pathinq cpi;
 5864                 struct  ccb_trans_settings cur_cts;
 5865 
 5866                 if (device == NULL) {
 5867                         cts->ccb_h.status = CAM_PATH_INVALID;
 5868                         xpt_done((union ccb *)cts);
 5869                         return;
 5870                 }
 5871 
 5872                 /*
 5873                  * Perform sanity checking against what the
 5874                  * controller and device can do.
 5875                  */
 5876                 xpt_setup_ccb(&cpi.ccb_h, cts->ccb_h.path, /*priority*/1);
 5877                 cpi.ccb_h.func_code = XPT_PATH_INQ;
 5878                 xpt_action((union ccb *)&cpi);
 5879                 xpt_setup_ccb(&cur_cts.ccb_h, cts->ccb_h.path, /*priority*/1);
 5880                 cur_cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
 5881                 cur_cts.flags = CCB_TRANS_CURRENT_SETTINGS;
 5882                 xpt_action((union ccb *)&cur_cts);
 5883                 inq_data = &device->inq_data;
 5884 
 5885                 /* Fill in any gaps in what the user gave us */
 5886                 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) == 0)
 5887                         cts->sync_period = cur_cts.sync_period;
 5888                 if ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) == 0)
 5889                         cts->sync_offset = cur_cts.sync_offset;
 5890                 if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) == 0)
 5891                         cts->bus_width = cur_cts.bus_width;
 5892                 if ((cts->valid & CCB_TRANS_DISC_VALID) == 0) {
 5893                         cts->flags &= ~CCB_TRANS_DISC_ENB;
 5894                         cts->flags |= cur_cts.flags & CCB_TRANS_DISC_ENB;
 5895                 }
 5896                 if ((cts->valid & CCB_TRANS_TQ_VALID) == 0) {
 5897                         cts->flags &= ~CCB_TRANS_TAG_ENB;
 5898                         cts->flags |= cur_cts.flags & CCB_TRANS_TAG_ENB;
 5899                 }
 5900 
 5901                 if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0
 5902                   && (inq_data->flags & SID_Sync) == 0)
 5903                  || ((cpi.hba_inquiry & PI_SDTR_ABLE) == 0)
 5904                  || (cts->sync_offset == 0)
 5905                  || (cts->sync_period == 0)) {
 5906                         /* Force async */
 5907                         cts->sync_period = 0;
 5908                         cts->sync_offset = 0;
 5909                 } else if ((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0) {
 5910 
 5911                         if ((inq_data->spi3data & SID_SPI_CLOCK_DT) == 0
 5912                          && cts->sync_period <= 0x9) {
 5913                                 /*
 5914                                  * Don't allow DT transmission rates if the
 5915                                  * device does not support it.
 5916                                  */
 5917                                 cts->sync_period = 0xa;
 5918                         }
 5919                         if ((inq_data->spi3data & SID_SPI_IUS) == 0
 5920                          && cts->sync_period <= 0x8) {
 5921                                 /*
 5922                                  * Don't allow PACE transmission rates
 5923                                  * if the device does support packetized
 5924                                  * transfers.
 5925                                  */
 5926                                 cts->sync_period = 0x9;
 5927                         }
 5928                 }
 5929 
 5930                 switch (cts->bus_width) {
 5931                 case MSG_EXT_WDTR_BUS_32_BIT:
 5932                         if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
 5933                           || (inq_data->flags & SID_WBus32) != 0)
 5934                          && (cpi.hba_inquiry & PI_WIDE_32) != 0)
 5935                                 break;
 5936                         /* Fall Through to 16-bit */
 5937                 case MSG_EXT_WDTR_BUS_16_BIT:
 5938                         if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
 5939                           || (inq_data->flags & SID_WBus16) != 0)
 5940                          && (cpi.hba_inquiry & PI_WIDE_16) != 0) {
 5941                                 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
 5942                                 break;
 5943                         }
 5944                         /* Fall Through to 8-bit */
 5945                 default: /* New bus width?? */
 5946                 case MSG_EXT_WDTR_BUS_8_BIT:
 5947                         /* All targets can do this */
 5948                         cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
 5949                         break;
 5950                 }
 5951 
 5952                 if ((cts->flags & CCB_TRANS_DISC_ENB) == 0) {
 5953                         /*
 5954                          * Can't tag queue without disconnection.
 5955                          */
 5956                         cts->flags &= ~CCB_TRANS_TAG_ENB;
 5957                         cts->valid |= CCB_TRANS_TQ_VALID;
 5958                 }
 5959 
 5960                 if ((cpi.hba_inquiry & PI_TAG_ABLE) == 0
 5961                  || (inq_data->flags & SID_CmdQue) == 0
 5962                  || (device->queue_flags & SCP_QUEUE_DQUE) != 0
 5963                  || (device->quirk->mintags == 0)) {
 5964                         /*
 5965                          * Can't tag on hardware that doesn't support,
 5966                          * doesn't have it enabled, or has broken tag support.
 5967                          */
 5968                         cts->flags &= ~CCB_TRANS_TAG_ENB;
 5969                 }
 5970         }
 5971 
 5972         qfrozen = FALSE;
 5973         if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) {
 5974                 int device_tagenb;
 5975 
 5976                 /*
 5977                  * If we are transitioning from tags to no-tags or
 5978                  * vice-versa, we need to carefully freeze and restart
 5979                  * the queue so that we don't overlap tagged and non-tagged
 5980                  * commands.  We also temporarily stop tags if there is
 5981                  * a change in transfer negotiation settings to allow
 5982                  * "tag-less" negotiation.
 5983                  */
 5984                 if ((device->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
 5985                  || (device->inq_flags & SID_CmdQue) != 0)
 5986                         device_tagenb = TRUE;
 5987                 else
 5988                         device_tagenb = FALSE;
 5989 
 5990                 if (((cts->flags & CCB_TRANS_TAG_ENB) != 0
 5991                   && device_tagenb == FALSE)
 5992                  || ((cts->flags & CCB_TRANS_TAG_ENB) == 0
 5993                   && device_tagenb == TRUE)) {
 5994 
 5995                         if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) {
 5996                                 /*
 5997                                  * Delay change to use tags until after a
 5998                                  * few commands have gone to this device so
 5999                                  * the controller has time to perform transfer
 6000                                  * negotiations without tagged messages getting
 6001                                  * in the way.
 6002                                  */
 6003                                 device->tag_delay_count = CAM_TAG_DELAY_COUNT;
 6004                                 device->flags |= CAM_DEV_TAG_AFTER_COUNT;
 6005                         } else {
 6006                                 xpt_freeze_devq(cts->ccb_h.path, /*count*/1);
 6007                                 qfrozen = TRUE;
 6008                                 device->inq_flags &= ~SID_CmdQue;
 6009                                 xpt_dev_ccbq_resize(cts->ccb_h.path,
 6010                                                     sim->max_dev_openings);
 6011                                 device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
 6012                                 device->tag_delay_count = 0;
 6013                         }
 6014                 }
 6015         }
 6016 
 6017         if (async_update == FALSE) {
 6018                 /*
 6019                  * If we are currently performing tagged transactions to
 6020                  * this device and want to change its negotiation parameters,
 6021                  * go non-tagged for a bit to give the controller a chance to
 6022                  * negotiate unhampered by tag messages.
 6023                  */
 6024                 if ((device->inq_flags & SID_CmdQue) != 0
 6025                  && (cts->flags & (CCB_TRANS_SYNC_RATE_VALID|
 6026                                    CCB_TRANS_SYNC_OFFSET_VALID|
 6027                                    CCB_TRANS_BUS_WIDTH_VALID)) != 0)
 6028                         xpt_toggle_tags(cts->ccb_h.path);
 6029 
 6030                 (*(sim->sim_action))(sim, (union ccb *)cts);
 6031         }
 6032 
 6033         if (qfrozen) {
 6034                 struct ccb_relsim crs;
 6035 
 6036                 xpt_setup_ccb(&crs.ccb_h, cts->ccb_h.path,
 6037                               /*priority*/1);
 6038                 crs.ccb_h.func_code = XPT_REL_SIMQ;
 6039                 crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
 6040                 crs.openings
 6041                     = crs.release_timeout 
 6042                     = crs.qfrozen_cnt
 6043                     = 0;
 6044                 xpt_action((union ccb *)&crs);
 6045         }
 6046 }
 6047 
 6048 static void
 6049 xpt_toggle_tags(struct cam_path *path)
 6050 {
 6051         struct cam_ed *dev;
 6052 
 6053         /*
 6054          * Give controllers a chance to renegotiate
 6055          * before starting tag operations.  We
 6056          * "toggle" tagged queuing off then on
 6057          * which causes the tag enable command delay
 6058          * counter to come into effect.
 6059          */
 6060         dev = path->device;
 6061         if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
 6062          || ((dev->inq_flags & SID_CmdQue) != 0
 6063           && (dev->inq_flags & (SID_Sync|SID_WBus16|SID_WBus32)) != 0)) {
 6064                 struct ccb_trans_settings cts;
 6065 
 6066                 xpt_setup_ccb(&cts.ccb_h, path, 1);
 6067                 cts.flags = 0;
 6068                 cts.valid = CCB_TRANS_TQ_VALID;
 6069                 xpt_set_transfer_settings(&cts, path->device,
 6070                                           /*async_update*/TRUE);
 6071                 cts.flags = CCB_TRANS_TAG_ENB;
 6072                 xpt_set_transfer_settings(&cts, path->device,
 6073                                           /*async_update*/TRUE);
 6074         }
 6075 }
 6076 
 6077 static void
 6078 xpt_start_tags(struct cam_path *path)
 6079 {
 6080         struct ccb_relsim crs;
 6081         struct cam_ed *device;
 6082         struct cam_sim *sim;
 6083         int    newopenings;
 6084 
 6085         device = path->device;
 6086         sim = path->bus->sim;
 6087         device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
 6088         xpt_freeze_devq(path, /*count*/1);
 6089         device->inq_flags |= SID_CmdQue;
 6090         newopenings = min(device->quirk->maxtags, sim->max_tagged_dev_openings);
 6091         xpt_dev_ccbq_resize(path, newopenings);
 6092         xpt_setup_ccb(&crs.ccb_h, path, /*priority*/1);
 6093         crs.ccb_h.func_code = XPT_REL_SIMQ;
 6094         crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
 6095         crs.openings
 6096             = crs.release_timeout 
 6097             = crs.qfrozen_cnt
 6098             = 0;
 6099         xpt_action((union ccb *)&crs);
 6100 }
 6101 
 6102 static int busses_to_config;
 6103 static int busses_to_reset;
 6104 
 6105 static int
 6106 xptconfigbuscountfunc(struct cam_eb *bus, void *arg)
 6107 {
 6108         if (bus->path_id != CAM_XPT_PATH_ID) {
 6109                 struct cam_path path;
 6110                 struct ccb_pathinq cpi;
 6111                 int can_negotiate;
 6112 
 6113                 busses_to_config++;
 6114                 xpt_compile_path(&path, NULL, bus->path_id,
 6115                                  CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
 6116                 xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
 6117                 cpi.ccb_h.func_code = XPT_PATH_INQ;
 6118                 xpt_action((union ccb *)&cpi);
 6119                 can_negotiate = cpi.hba_inquiry;
 6120                 can_negotiate &= (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE);
 6121                 if ((cpi.hba_misc & PIM_NOBUSRESET) == 0
 6122                  && can_negotiate)
 6123                         busses_to_reset++;
 6124                 xpt_release_path(&path);
 6125         }
 6126 
 6127         return(1);
 6128 }
 6129 
 6130 static int
 6131 xptconfigfunc(struct cam_eb *bus, void *arg)
 6132 {
 6133         struct  cam_path *path;
 6134         union   ccb *work_ccb;
 6135 
 6136         if (bus->path_id != CAM_XPT_PATH_ID) {
 6137                 cam_status status;
 6138                 int can_negotiate;
 6139 
 6140                 work_ccb = xpt_alloc_ccb();
 6141                 if ((status = xpt_create_path(&path, xpt_periph, bus->path_id,
 6142                                               CAM_TARGET_WILDCARD,
 6143                                               CAM_LUN_WILDCARD)) !=CAM_REQ_CMP){
 6144                         printf("xptconfigfunc: xpt_create_path failed with "
 6145                                "status %#x for bus %d\n", status, bus->path_id);
 6146                         printf("xptconfigfunc: halting bus configuration\n");
 6147                         xpt_free_ccb(work_ccb);
 6148                         busses_to_config--;
 6149                         xpt_finishconfig(xpt_periph, NULL);
 6150                         return(0);
 6151                 }
 6152                 xpt_setup_ccb(&work_ccb->ccb_h, path, /*priority*/1);
 6153                 work_ccb->ccb_h.func_code = XPT_PATH_INQ;
 6154                 xpt_action(work_ccb);
 6155                 if (work_ccb->ccb_h.status != CAM_REQ_CMP) {
 6156                         printf("xptconfigfunc: CPI failed on bus %d "
 6157                                "with status %d\n", bus->path_id,
 6158                                work_ccb->ccb_h.status);
 6159                         xpt_finishconfig(xpt_periph, work_ccb);
 6160                         return(1);
 6161                 }
 6162 
 6163                 can_negotiate = work_ccb->cpi.hba_inquiry;
 6164                 can_negotiate &= (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE);
 6165                 if ((work_ccb->cpi.hba_misc & PIM_NOBUSRESET) == 0
 6166                  && (can_negotiate != 0)) {
 6167                         xpt_setup_ccb(&work_ccb->ccb_h, path, /*priority*/1);
 6168                         work_ccb->ccb_h.func_code = XPT_RESET_BUS;
 6169                         work_ccb->ccb_h.cbfcnp = NULL;
 6170                         CAM_DEBUG(path, CAM_DEBUG_SUBTRACE,
 6171                                   ("Resetting Bus\n"));
 6172                         xpt_action(work_ccb);
 6173                         xpt_finishconfig(xpt_periph, work_ccb);
 6174                 } else {
 6175                         /* Act as though we performed a successful BUS RESET */
 6176                         work_ccb->ccb_h.func_code = XPT_RESET_BUS;
 6177                         xpt_finishconfig(xpt_periph, work_ccb);
 6178                 }
 6179         }
 6180 
 6181         return(1);
 6182 }
 6183 
 6184 static void
 6185 xpt_config(void *arg)
 6186 {
 6187         /* Now that interrupts are enabled, go find our devices */
 6188 
 6189 #ifdef CAMDEBUG
 6190         /* Setup debugging flags and path */
 6191 #ifdef CAM_DEBUG_FLAGS
 6192         cam_dflags = CAM_DEBUG_FLAGS;
 6193 #else /* !CAM_DEBUG_FLAGS */
 6194         cam_dflags = CAM_DEBUG_NONE;
 6195 #endif /* CAM_DEBUG_FLAGS */
 6196 #ifdef CAM_DEBUG_BUS
 6197         if (cam_dflags != CAM_DEBUG_NONE) {
 6198                 if (xpt_create_path(&cam_dpath, xpt_periph,
 6199                                     CAM_DEBUG_BUS, CAM_DEBUG_TARGET,
 6200                                     CAM_DEBUG_LUN) != CAM_REQ_CMP) {
 6201                         printf("xpt_config: xpt_create_path() failed for debug"
 6202                                " target %d:%d:%d, debugging disabled\n",
 6203                                CAM_DEBUG_BUS, CAM_DEBUG_TARGET, CAM_DEBUG_LUN);
 6204                         cam_dflags = CAM_DEBUG_NONE;
 6205                 }
 6206         } else
 6207                 cam_dpath = NULL;
 6208 #else /* !CAM_DEBUG_BUS */
 6209         cam_dpath = NULL;
 6210 #endif /* CAM_DEBUG_BUS */
 6211 #endif /* CAMDEBUG */
 6212 
 6213         /*
 6214          * Scan all installed busses.
 6215          */
 6216         xpt_for_all_busses(xptconfigbuscountfunc, NULL);
 6217 
 6218         if (busses_to_config == 0) {
 6219                 /* Call manually because we don't have any busses */
 6220                 xpt_finishconfig(xpt_periph, NULL);
 6221         } else  {
 6222                 if (busses_to_reset > 0 && SCSI_DELAY >= 2000) {
 6223                         printf("Waiting %d seconds for SCSI "
 6224                                "devices to settle\n", SCSI_DELAY/1000);
 6225                 }
 6226                 xpt_for_all_busses(xptconfigfunc, NULL);
 6227         }
 6228 }
 6229 
 6230 /*
 6231  * If the given device only has one peripheral attached to it, and if that
 6232  * peripheral is the passthrough driver, announce it.  This insures that the
 6233  * user sees some sort of announcement for every peripheral in their system.
 6234  */
 6235 static int
 6236 xptpassannouncefunc(struct cam_ed *device, void *arg)
 6237 {
 6238         struct cam_periph *periph;
 6239         int i;
 6240 
 6241         for (periph = SLIST_FIRST(&device->periphs), i = 0; periph != NULL;
 6242              periph = SLIST_NEXT(periph, periph_links), i++);
 6243 
 6244         periph = SLIST_FIRST(&device->periphs);
 6245         if ((i == 1)
 6246          && (strncmp(periph->periph_name, "pass", 4) == 0))
 6247                 xpt_announce_periph(periph, NULL);
 6248 
 6249         return(1);
 6250 }
 6251 
 6252 static void
 6253 xpt_finishconfig(struct cam_periph *periph, union ccb *done_ccb)
 6254 {
 6255         struct  periph_driver **p_drv;
 6256         int     i;
 6257 
 6258         if (done_ccb != NULL) {
 6259                 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE,
 6260                           ("xpt_finishconfig\n"));
 6261                 switch(done_ccb->ccb_h.func_code) {
 6262                 case XPT_RESET_BUS:
 6263                         if (done_ccb->ccb_h.status == CAM_REQ_CMP) {
 6264                                 done_ccb->ccb_h.func_code = XPT_SCAN_BUS;
 6265                                 done_ccb->ccb_h.cbfcnp = xpt_finishconfig;
 6266                                 xpt_action(done_ccb);
 6267                                 return;
 6268                         }
 6269                         /* FALLTHROUGH */
 6270                 case XPT_SCAN_BUS:
 6271                 default:
 6272                         xpt_free_path(done_ccb->ccb_h.path);
 6273                         busses_to_config--;
 6274                         break;
 6275                 }
 6276         }
 6277 
 6278         if (busses_to_config == 0) {
 6279                 /* Register all the peripheral drivers */
 6280                 /* XXX This will have to change when we have loadable modules */
 6281                 p_drv = (struct periph_driver **)periphdriver_set.ls_items;
 6282                 for (i = 0; p_drv[i] != NULL; i++) {
 6283                         (*p_drv[i]->init)();
 6284                 }
 6285 
 6286                 /*
 6287                  * Check for devices with no "standard" peripheral driver
 6288                  * attached.  For any devices like that, announce the
 6289                  * passthrough driver so the user will see something.
 6290                  */
 6291                 xpt_for_all_devices(xptpassannouncefunc, NULL);
 6292 
 6293                 /* Release our hook so that the boot can continue. */
 6294                 config_intrhook_disestablish(xpt_config_hook);
 6295                 free(xpt_config_hook, M_TEMP);
 6296                 xpt_config_hook = NULL;
 6297         }
 6298         if (done_ccb != NULL)
 6299                 xpt_free_ccb(done_ccb);
 6300 }
 6301 
 6302 static void
 6303 xptaction(struct cam_sim *sim, union ccb *work_ccb)
 6304 {
 6305         CAM_DEBUG(work_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xptaction\n"));
 6306 
 6307         switch (work_ccb->ccb_h.func_code) {
 6308         /* Common cases first */
 6309         case XPT_PATH_INQ:              /* Path routing inquiry */
 6310         {
 6311                 struct ccb_pathinq *cpi;
 6312 
 6313                 cpi = &work_ccb->cpi;
 6314                 cpi->version_num = 1; /* XXX??? */
 6315                 cpi->hba_inquiry = 0;
 6316                 cpi->target_sprt = 0;
 6317                 cpi->hba_misc = 0;
 6318                 cpi->hba_eng_cnt = 0;
 6319                 cpi->max_target = 0;
 6320                 cpi->max_lun = 0;
 6321                 cpi->initiator_id = 0;
 6322                 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
 6323                 strncpy(cpi->hba_vid, "", HBA_IDLEN);
 6324                 strncpy(cpi->dev_name, sim->sim_name, DEV_IDLEN);
 6325                 cpi->unit_number = sim->unit_number;
 6326                 cpi->bus_id = sim->bus_id;
 6327                 cpi->base_transfer_speed = 0;
 6328                 cpi->ccb_h.status = CAM_REQ_CMP;
 6329                 xpt_done(work_ccb);
 6330                 break;
 6331         }
 6332         default:
 6333                 work_ccb->ccb_h.status = CAM_REQ_INVALID;
 6334                 xpt_done(work_ccb);
 6335                 break;
 6336         }
 6337 }
 6338 
 6339 /*
 6340  * The xpt as a "controller" has no interrupt sources, so polling
 6341  * is a no-op.
 6342  */
 6343 static void
 6344 xptpoll(struct cam_sim *sim)
 6345 {
 6346 }
 6347 
 6348 /*
 6349  * Should only be called by the machine interrupt dispatch routines,
 6350  * so put these prototypes here instead of in the header.
 6351  */
 6352 
 6353 static void
 6354 swi_camnet(void)
 6355 {
 6356         camisr(&cam_netq);
 6357 }
 6358 
 6359 static void
 6360 swi_cambio(void)
 6361 {
 6362         camisr(&cam_bioq);
 6363 }
 6364 
 6365 static void
 6366 camisr(cam_isrq_t *queue)
 6367 {
 6368         int     s;
 6369         struct  ccb_hdr *ccb_h;
 6370 
 6371         s = splcam();
 6372         while ((ccb_h = TAILQ_FIRST(queue)) != NULL) {
 6373                 int     runq;
 6374 
 6375                 TAILQ_REMOVE(queue, ccb_h, sim_links.tqe);
 6376                 ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
 6377                 splx(s);
 6378 
 6379                 CAM_DEBUG(ccb_h->path, CAM_DEBUG_TRACE,
 6380                           ("camisr\n"));
 6381 
 6382                 runq = FALSE;
 6383 
 6384                 if (ccb_h->flags & CAM_HIGH_POWER) {
 6385                         struct highpowerlist    *hphead;
 6386                         struct cam_ed           *device;
 6387                         union ccb               *send_ccb;
 6388 
 6389                         hphead = &highpowerq;
 6390 
 6391                         send_ccb = (union ccb *)STAILQ_FIRST(hphead);
 6392 
 6393                         /*
 6394                          * Increment the count since this command is done.
 6395                          */
 6396                         num_highpower++;
 6397 
 6398                         /* 
 6399                          * Any high powered commands queued up?
 6400                          */
 6401                         if (send_ccb != NULL) {
 6402                                 device = send_ccb->ccb_h.path->device;
 6403 
 6404                                 STAILQ_REMOVE_HEAD(hphead, xpt_links.stqe);
 6405 
 6406                                 xpt_release_devq(send_ccb->ccb_h.path,
 6407                                                  /*count*/1, /*runqueue*/TRUE);
 6408                         }
 6409                 }
 6410                 if ((ccb_h->func_code & XPT_FC_USER_CCB) == 0) {
 6411                         struct cam_ed *dev;
 6412 
 6413                         dev = ccb_h->path->device;
 6414 
 6415                         s = splcam();
 6416                         cam_ccbq_ccb_done(&dev->ccbq, (union ccb *)ccb_h);
 6417 
 6418                         ccb_h->path->bus->sim->devq->send_active--;
 6419                         ccb_h->path->bus->sim->devq->send_openings++;
 6420                         splx(s);
 6421                         
 6422                         if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0
 6423                          || ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
 6424                           && (dev->ccbq.dev_active == 0))) {
 6425                                 
 6426                                 xpt_release_devq(ccb_h->path, /*count*/1,
 6427                                                  /*run_queue*/TRUE);
 6428                         }
 6429 
 6430                         if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
 6431                          && (--dev->tag_delay_count == 0))
 6432                                 xpt_start_tags(ccb_h->path);
 6433 
 6434                         if ((dev->ccbq.queue.entries > 0)
 6435                          && (dev->qfrozen_cnt == 0)
 6436                          && (device_is_send_queued(dev) == 0)) {
 6437                                 runq = xpt_schedule_dev_sendq(ccb_h->path->bus,
 6438                                                               dev);
 6439                         }
 6440                 }
 6441 
 6442                 if (ccb_h->status & CAM_RELEASE_SIMQ) {
 6443                         xpt_release_simq(ccb_h->path->bus->sim,
 6444                                          /*run_queue*/TRUE);
 6445                         ccb_h->status &= ~CAM_RELEASE_SIMQ;
 6446                         runq = FALSE;
 6447                 } 
 6448 
 6449                 if ((ccb_h->flags & CAM_DEV_QFRZDIS)
 6450                  && (ccb_h->status & CAM_DEV_QFRZN)) {
 6451                         xpt_release_devq(ccb_h->path, /*count*/1,
 6452                                          /*run_queue*/TRUE);
 6453                         ccb_h->status &= ~CAM_DEV_QFRZN;
 6454                 } else if (runq) {
 6455                         xpt_run_dev_sendq(ccb_h->path->bus);
 6456                 }
 6457 
 6458                 /* Call the peripheral driver's callback */
 6459                 (*ccb_h->cbfcnp)(ccb_h->path->periph, (union ccb *)ccb_h);
 6460 
 6461                 /* Raise IPL for while test */
 6462                 s = splcam();
 6463         }
 6464         splx(s);
 6465 }

Cache object: 0d2363f57a3fd54153f1efbecfde6a23


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.