The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/cam/cam_xpt.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Implementation of the Common Access Method Transport (XPT) layer.
    3  *
    4  * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs.
    5  * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry.
    6  * All rights reserved.
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions, and the following disclaimer,
   13  *    without modification, immediately at the beginning of the file.
   14  * 2. The name of the author may not be used to endorse or promote products
   15  *    derived from this software without specific prior written permission.
   16  *
   17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   20  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
   21  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   27  * SUCH DAMAGE.
   28  */
   29 
   30 #include <sys/cdefs.h>
   31 __FBSDID("$FreeBSD$");
   32 
   33 #include <sys/param.h>
   34 #include <sys/bus.h>
   35 #include <sys/systm.h>
   36 #include <sys/types.h>
   37 #include <sys/malloc.h>
   38 #include <sys/kernel.h>
   39 #include <sys/time.h>
   40 #include <sys/conf.h>
   41 #include <sys/fcntl.h>
   42 #include <sys/md5.h>
   43 #include <sys/interrupt.h>
   44 #include <sys/sbuf.h>
   45 #include <sys/taskqueue.h>
   46 
   47 #include <sys/lock.h>
   48 #include <sys/mutex.h>
   49 #include <sys/sysctl.h>
   50 #include <sys/kthread.h>
   51 
   52 #ifdef PC98
   53 #include <pc98/pc98/pc98_machdep.h>     /* geometry translation */
   54 #endif
   55 
   56 #include <cam/cam.h>
   57 #include <cam/cam_ccb.h>
   58 #include <cam/cam_periph.h>
   59 #include <cam/cam_sim.h>
   60 #include <cam/cam_xpt.h>
   61 #include <cam/cam_xpt_sim.h>
   62 #include <cam/cam_xpt_periph.h>
   63 #include <cam/cam_debug.h>
   64 
   65 #include <cam/scsi/scsi_all.h>
   66 #include <cam/scsi/scsi_message.h>
   67 #include <cam/scsi/scsi_pass.h>
   68 #include <machine/stdarg.h>     /* for xpt_print below */
   69 #include "opt_cam.h"
   70 
   71 /* Datastructures internal to the xpt layer */
   72 MALLOC_DEFINE(M_CAMXPT, "CAM XPT", "CAM XPT buffers");
   73 
   74 /* Object for defering XPT actions to a taskqueue */
   75 struct xpt_task {
   76         struct task     task;
   77         void            *data1;
   78         uintptr_t       data2;
   79 };
   80 
   81 /*
   82  * Definition of an async handler callback block.  These are used to add
   83  * SIMs and peripherals to the async callback lists.
   84  */
   85 struct async_node {
   86         SLIST_ENTRY(async_node) links;
   87         u_int32_t       event_enable;   /* Async Event enables */
   88         void            (*callback)(void *arg, u_int32_t code,
   89                                     struct cam_path *path, void *args);
   90         void            *callback_arg;
   91 };
   92 
   93 SLIST_HEAD(async_list, async_node);
   94 SLIST_HEAD(periph_list, cam_periph);
   95 
   96 /*
   97  * This is the maximum number of high powered commands (e.g. start unit)
   98  * that can be outstanding at a particular time.
   99  */
  100 #ifndef CAM_MAX_HIGHPOWER
  101 #define CAM_MAX_HIGHPOWER  4
  102 #endif
  103 
  104 /*
  105  * Structure for queueing a device in a run queue.
  106  * There is one run queue for allocating new ccbs,
  107  * and another for sending ccbs to the controller.
  108  */
  109 struct cam_ed_qinfo {
  110         cam_pinfo pinfo;
  111         struct    cam_ed *device;
  112 };
  113 
  114 /*
  115  * The CAM EDT (Existing Device Table) contains the device information for
  116  * all devices for all busses in the system.  The table contains a
  117  * cam_ed structure for each device on the bus.
  118  */
  119 struct cam_ed {
  120         TAILQ_ENTRY(cam_ed) links;
  121         struct  cam_ed_qinfo alloc_ccb_entry;
  122         struct  cam_ed_qinfo send_ccb_entry;
  123         struct  cam_et   *target;
  124         struct  cam_sim  *sim;
  125         lun_id_t         lun_id;
  126         struct  camq drvq;              /*
  127                                          * Queue of type drivers wanting to do
  128                                          * work on this device.
  129                                          */
  130         struct  cam_ccbq ccbq;          /* Queue of pending ccbs */
  131         struct  async_list asyncs;      /* Async callback info for this B/T/L */
  132         struct  periph_list periphs;    /* All attached devices */
  133         u_int   generation;             /* Generation number */
  134         struct  cam_periph *owner;      /* Peripheral driver's ownership tag */
  135         struct  xpt_quirk_entry *quirk; /* Oddities about this device */
  136                                         /* Storage for the inquiry data */
  137         cam_proto        protocol;
  138         u_int            protocol_version;
  139         cam_xport        transport;
  140         u_int            transport_version;
  141         struct           scsi_inquiry_data inq_data;
  142         u_int8_t         inq_flags;     /*
  143                                          * Current settings for inquiry flags.
  144                                          * This allows us to override settings
  145                                          * like disconnection and tagged
  146                                          * queuing for a device.
  147                                          */
  148         u_int8_t         queue_flags;   /* Queue flags from the control page */
  149         u_int8_t         serial_num_len;
  150         u_int8_t        *serial_num;
  151         u_int32_t        qfrozen_cnt;
  152         u_int32_t        flags;
  153 #define CAM_DEV_UNCONFIGURED            0x01
  154 #define CAM_DEV_REL_TIMEOUT_PENDING     0x02
  155 #define CAM_DEV_REL_ON_COMPLETE         0x04
  156 #define CAM_DEV_REL_ON_QUEUE_EMPTY      0x08
  157 #define CAM_DEV_RESIZE_QUEUE_NEEDED     0x10
  158 #define CAM_DEV_TAG_AFTER_COUNT         0x20
  159 #define CAM_DEV_INQUIRY_DATA_VALID      0x40
  160 #define CAM_DEV_IN_DV                   0x80
  161 #define CAM_DEV_DV_HIT_BOTTOM           0x100
  162         u_int32_t        tag_delay_count;
  163 #define CAM_TAG_DELAY_COUNT             5
  164         u_int32_t        tag_saved_openings;
  165         u_int32_t        refcount;
  166         struct callout   callout;
  167 };
  168 
  169 /*
  170  * Each target is represented by an ET (Existing Target).  These
  171  * entries are created when a target is successfully probed with an
  172  * identify, and removed when a device fails to respond after a number
  173  * of retries, or a bus rescan finds the device missing.
  174  */
  175 struct cam_et {
  176         TAILQ_HEAD(, cam_ed) ed_entries;
  177         TAILQ_ENTRY(cam_et) links;
  178         struct  cam_eb  *bus;
  179         target_id_t     target_id;
  180         u_int32_t       refcount;
  181         u_int           generation;
  182         struct          timeval last_reset;
  183 };
  184 
  185 /*
  186  * Each bus is represented by an EB (Existing Bus).  These entries
  187  * are created by calls to xpt_bus_register and deleted by calls to
  188  * xpt_bus_deregister.
  189  */
  190 struct cam_eb {
  191         TAILQ_HEAD(, cam_et) et_entries;
  192         TAILQ_ENTRY(cam_eb)  links;
  193         path_id_t            path_id;
  194         struct cam_sim       *sim;
  195         struct timeval       last_reset;
  196         u_int32_t            flags;
  197 #define CAM_EB_RUNQ_SCHEDULED   0x01
  198         u_int32_t            refcount;
  199         u_int                generation;
  200         device_t             parent_dev;
  201 };
  202 
  203 struct cam_path {
  204         struct cam_periph *periph;
  205         struct cam_eb     *bus;
  206         struct cam_et     *target;
  207         struct cam_ed     *device;
  208 };
  209 
  210 struct xpt_quirk_entry {
  211         struct scsi_inquiry_pattern inq_pat;
  212         u_int8_t quirks;
  213 #define CAM_QUIRK_NOLUNS        0x01
  214 #define CAM_QUIRK_NOSERIAL      0x02
  215 #define CAM_QUIRK_HILUNS        0x04
  216 #define CAM_QUIRK_NOHILUNS      0x08
  217         u_int mintags;
  218         u_int maxtags;
  219 };
  220 
  221 static int cam_srch_hi = 0;
  222 TUNABLE_INT("kern.cam.cam_srch_hi", &cam_srch_hi);
  223 static int sysctl_cam_search_luns(SYSCTL_HANDLER_ARGS);
  224 SYSCTL_PROC(_kern_cam, OID_AUTO, cam_srch_hi, CTLTYPE_INT|CTLFLAG_RW, 0, 0,
  225     sysctl_cam_search_luns, "I",
  226     "allow search above LUN 7 for SCSI3 and greater devices");
  227 
  228 #define CAM_SCSI2_MAXLUN        8
  229 /*
  230  * If we're not quirked to search <= the first 8 luns
  231  * and we are either quirked to search above lun 8,
  232  * or we're > SCSI-2 and we've enabled hilun searching,
  233  * or we're > SCSI-2 and the last lun was a success,
  234  * we can look for luns above lun 8.
  235  */
  236 #define CAN_SRCH_HI_SPARSE(dv)                          \
  237   (((dv->quirk->quirks & CAM_QUIRK_NOHILUNS) == 0)      \
  238   && ((dv->quirk->quirks & CAM_QUIRK_HILUNS)            \
  239   || (SID_ANSI_REV(&dv->inq_data) > SCSI_REV_2 && cam_srch_hi)))
  240 
  241 #define CAN_SRCH_HI_DENSE(dv)                           \
  242   (((dv->quirk->quirks & CAM_QUIRK_NOHILUNS) == 0)      \
  243   && ((dv->quirk->quirks & CAM_QUIRK_HILUNS)            \
  244   || (SID_ANSI_REV(&dv->inq_data) > SCSI_REV_2)))
  245 
  246 typedef enum {
  247         XPT_FLAG_OPEN           = 0x01
  248 } xpt_flags;
  249 
  250 struct xpt_softc {
  251         xpt_flags               flags;
  252         u_int32_t               xpt_generation;
  253 
  254         /* number of high powered commands that can go through right now */
  255         STAILQ_HEAD(highpowerlist, ccb_hdr)     highpowerq;
  256         int                     num_highpower;
  257 
  258         /* queue for handling async rescan requests. */
  259         TAILQ_HEAD(, ccb_hdr) ccb_scanq;
  260 
  261         /* Registered busses */
  262         TAILQ_HEAD(,cam_eb)     xpt_busses;
  263         u_int                   bus_generation;
  264 
  265         struct intr_config_hook *xpt_config_hook;
  266 
  267         struct mtx              xpt_topo_lock;
  268         struct mtx              xpt_lock;
  269 };
  270 
  271 static const char quantum[] = "QUANTUM";
  272 static const char sony[] = "SONY";
  273 static const char west_digital[] = "WDIGTL";
  274 static const char samsung[] = "SAMSUNG";
  275 static const char seagate[] = "SEAGATE";
  276 static const char microp[] = "MICROP";
  277 
  278 static struct xpt_quirk_entry xpt_quirk_table[] =
  279 {
  280         {
  281                 /* Reports QUEUE FULL for temporary resource shortages */
  282                 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP39100*", "*" },
  283                 /*quirks*/0, /*mintags*/24, /*maxtags*/32
  284         },
  285         {
  286                 /* Reports QUEUE FULL for temporary resource shortages */
  287                 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP34550*", "*" },
  288                 /*quirks*/0, /*mintags*/24, /*maxtags*/32
  289         },
  290         {
  291                 /* Reports QUEUE FULL for temporary resource shortages */
  292                 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP32275*", "*" },
  293                 /*quirks*/0, /*mintags*/24, /*maxtags*/32
  294         },
  295         {
  296                 /* Broken tagged queuing drive */
  297                 { T_DIRECT, SIP_MEDIA_FIXED, microp, "4421-07*", "*" },
  298                 /*quirks*/0, /*mintags*/0, /*maxtags*/
  299         },
  300         {
  301                 /* Broken tagged queuing drive */
  302                 { T_DIRECT, SIP_MEDIA_FIXED, "HP", "C372*", "*" },
  303                 /*quirks*/0, /*mintags*/0, /*maxtags*/
  304         },
  305         {
  306                 /* Broken tagged queuing drive */
  307                 { T_DIRECT, SIP_MEDIA_FIXED, microp, "3391*", "x43h" },
  308                 /*quirks*/0, /*mintags*/0, /*maxtags*/
  309         },
  310         {
  311                 /*
  312                  * Unfortunately, the Quantum Atlas III has the same
  313                  * problem as the Atlas II drives above.
  314                  * Reported by: "Johan Granlund" <johan@granlund.nu>
  315                  *
  316                  * For future reference, the drive with the problem was:
  317                  * QUANTUM QM39100TD-SW N1B0
  318                  *
  319                  * It's possible that Quantum will fix the problem in later
  320                  * firmware revisions.  If that happens, the quirk entry
  321                  * will need to be made specific to the firmware revisions
  322                  * with the problem.
  323                  *
  324                  */
  325                 /* Reports QUEUE FULL for temporary resource shortages */
  326                 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM39100*", "*" },
  327                 /*quirks*/0, /*mintags*/24, /*maxtags*/32
  328         },
  329         {
  330                 /*
  331                  * 18 Gig Atlas III, same problem as the 9G version.
  332                  * Reported by: Andre Albsmeier
  333                  *              <andre.albsmeier@mchp.siemens.de>
  334                  *
  335                  * For future reference, the drive with the problem was:
  336                  * QUANTUM QM318000TD-S N491
  337                  */
  338                 /* Reports QUEUE FULL for temporary resource shortages */
  339                 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM318000*", "*" },
  340                 /*quirks*/0, /*mintags*/24, /*maxtags*/32
  341         },
  342         {
  343                 /*
  344                  * Broken tagged queuing drive
  345                  * Reported by: Bret Ford <bford@uop.cs.uop.edu>
  346                  *         and: Martin Renters <martin@tdc.on.ca>
  347                  */
  348                 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST410800*", "71*" },
  349                 /*quirks*/0, /*mintags*/0, /*maxtags*/
  350         },
  351                 /*
  352                  * The Seagate Medalist Pro drives have very poor write
  353                  * performance with anything more than 2 tags.
  354                  *
  355                  * Reported by:  Paul van der Zwan <paulz@trantor.xs4all.nl>
  356                  * Drive:  <SEAGATE ST36530N 1444>
  357                  *
  358                  * Reported by:  Jeremy Lea <reg@shale.csir.co.za>
  359                  * Drive:  <SEAGATE ST34520W 1281>
  360                  *
  361                  * No one has actually reported that the 9G version
  362                  * (ST39140*) of the Medalist Pro has the same problem, but
  363                  * we're assuming that it does because the 4G and 6.5G
  364                  * versions of the drive are broken.
  365                  */
  366         {
  367                 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST34520*", "*"},
  368                 /*quirks*/0, /*mintags*/2, /*maxtags*/2
  369         },
  370         {
  371                 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST36530*", "*"},
  372                 /*quirks*/0, /*mintags*/2, /*maxtags*/2
  373         },
  374         {
  375                 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST39140*", "*"},
  376                 /*quirks*/0, /*mintags*/2, /*maxtags*/2
  377         },
  378         {
  379                 /*
  380                  * Slow when tagged queueing is enabled.  Write performance
  381                  * steadily drops off with more and more concurrent
  382                  * transactions.  Best sequential write performance with
  383                  * tagged queueing turned off and write caching turned on.
  384                  *
  385                  * PR:  kern/10398
  386                  * Submitted by:  Hideaki Okada <hokada@isl.melco.co.jp>
  387                  * Drive:  DCAS-34330 w/ "S65A" firmware.
  388                  *
  389                  * The drive with the problem had the "S65A" firmware
  390                  * revision, and has also been reported (by Stephen J.
  391                  * Roznowski <sjr@home.net>) for a drive with the "S61A"
  392                  * firmware revision.
  393                  *
  394                  * Although no one has reported problems with the 2 gig
  395                  * version of the DCAS drive, the assumption is that it
  396                  * has the same problems as the 4 gig version.  Therefore
  397                  * this quirk entries disables tagged queueing for all
  398                  * DCAS drives.
  399                  */
  400                 { T_DIRECT, SIP_MEDIA_FIXED, "IBM", "DCAS*", "*" },
  401                 /*quirks*/0, /*mintags*/0, /*maxtags*/
  402         },
  403         {
  404                 /* Broken tagged queuing drive */
  405                 { T_DIRECT, SIP_MEDIA_REMOVABLE, "iomega", "jaz*", "*" },
  406                 /*quirks*/0, /*mintags*/0, /*maxtags*/
  407         },
  408         {
  409                 /* Broken tagged queuing drive */
  410                 { T_DIRECT, SIP_MEDIA_FIXED, "CONNER", "CFP2107*", "*" },
  411                 /*quirks*/0, /*mintags*/0, /*maxtags*/
  412         },
  413         {
  414                 /* This does not support other than LUN 0 */
  415                 { T_DIRECT, SIP_MEDIA_FIXED, "VMware*", "*", "*" },
  416                 CAM_QUIRK_NOLUNS, /*mintags*/2, /*maxtags*/255
  417         },
  418         {
  419                 /*
  420                  * Broken tagged queuing drive.
  421                  * Submitted by:
  422                  * NAKAJI Hiroyuki <nakaji@zeisei.dpri.kyoto-u.ac.jp>
  423                  * in PR kern/9535
  424                  */
  425                 { T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN34324U*", "*" },
  426                 /*quirks*/0, /*mintags*/0, /*maxtags*/
  427         },
  428         {
  429                 /*
  430                  * Slow when tagged queueing is enabled. (1.5MB/sec versus
  431                  * 8MB/sec.)
  432                  * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
  433                  * Best performance with these drives is achieved with
  434                  * tagged queueing turned off, and write caching turned on.
  435                  */
  436                 { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "WDE*", "*" },
  437                 /*quirks*/0, /*mintags*/0, /*maxtags*/
  438         },
  439         {
  440                 /*
  441                  * Slow when tagged queueing is enabled. (1.5MB/sec versus
  442                  * 8MB/sec.)
  443                  * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
  444                  * Best performance with these drives is achieved with
  445                  * tagged queueing turned off, and write caching turned on.
  446                  */
  447                 { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "ENTERPRISE", "*" },
  448                 /*quirks*/0, /*mintags*/0, /*maxtags*/
  449         },
  450         {
  451                 /*
  452                  * Doesn't handle queue full condition correctly,
  453                  * so we need to limit maxtags to what the device
  454                  * can handle instead of determining this automatically.
  455                  */
  456                 { T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN321010S*", "*" },
  457                 /*quirks*/0, /*mintags*/2, /*maxtags*/32
  458         },
  459         {
  460                 /* Really only one LUN */
  461                 { T_ENCLOSURE, SIP_MEDIA_FIXED, "SUN", "SENA", "*" },
  462                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
  463         },
  464         {
  465                 /* I can't believe we need a quirk for DPT volumes. */
  466                 { T_ANY, SIP_MEDIA_FIXED|SIP_MEDIA_REMOVABLE, "DPT", "*", "*" },
  467                 CAM_QUIRK_NOLUNS,
  468                 /*mintags*/0, /*maxtags*/255
  469         },
  470         {
  471                 /*
  472                  * Many Sony CDROM drives don't like multi-LUN probing.
  473                  */
  474                 { T_CDROM, SIP_MEDIA_REMOVABLE, sony, "CD-ROM CDU*", "*" },
  475                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
  476         },
  477         {
  478                 /*
  479                  * This drive doesn't like multiple LUN probing.
  480                  * Submitted by:  Parag Patel <parag@cgt.com>
  481                  */
  482                 { T_WORM, SIP_MEDIA_REMOVABLE, sony, "CD-R   CDU9*", "*" },
  483                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
  484         },
  485         {
  486                 { T_WORM, SIP_MEDIA_REMOVABLE, "YAMAHA", "CDR100*", "*" },
  487                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
  488         },
  489         {
  490                 /*
  491                  * The 8200 doesn't like multi-lun probing, and probably
  492                  * don't like serial number requests either.
  493                  */
  494                 {
  495                         T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE",
  496                         "EXB-8200*", "*"
  497                 },
  498                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
  499         },
  500         {
  501                 /*
  502                  * Let's try the same as above, but for a drive that says
  503                  * it's an IPL-6860 but is actually an EXB 8200.
  504                  */
  505                 {
  506                         T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE",
  507                         "IPL-6860*", "*"
  508                 },
  509                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
  510         },
  511         {
  512                 /*
  513                  * These Hitachi drives don't like multi-lun probing.
  514                  * The PR submitter has a DK319H, but says that the Linux
  515                  * kernel has a similar work-around for the DK312 and DK314,
  516                  * so all DK31* drives are quirked here.
  517                  * PR:            misc/18793
  518                  * Submitted by:  Paul Haddad <paul@pth.com>
  519                  */
  520                 { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "DK31*", "*" },
  521                 CAM_QUIRK_NOLUNS, /*mintags*/2, /*maxtags*/255
  522         },
  523         {
  524                 /*
  525                  * The Hitachi CJ series with J8A8 firmware apparantly has
  526                  * problems with tagged commands.
  527                  * PR: 23536
  528                  * Reported by: amagai@nue.org
  529                  */
  530                 { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "DK32CJ*", "J8A8" },
  531                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
  532         },
  533         {
  534                 /*
  535                  * These are the large storage arrays.
  536                  * Submitted by:  William Carrel <william.carrel@infospace.com>
  537                  */
  538                 { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "OPEN*", "*" },
  539                 CAM_QUIRK_HILUNS, 2, 1024
  540         },
  541         {
  542                 /*
  543                  * This old revision of the TDC3600 is also SCSI-1, and
  544                  * hangs upon serial number probing.
  545                  */
  546                 {
  547                         T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "TANDBERG",
  548                         " TDC 3600", "U07:"
  549                 },
  550                 CAM_QUIRK_NOSERIAL, /*mintags*/0, /*maxtags*/
  551         },
  552         {
  553                 /*
  554                  * Would repond to all LUNs if asked for.
  555                  */
  556                 {
  557                         T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "CALIPER",
  558                         "CP150", "*"
  559                 },
  560                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
  561         },
  562         {
  563                 /*
  564                  * Would repond to all LUNs if asked for.
  565                  */
  566                 {
  567                         T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "KENNEDY",
  568                         "96X2*", "*"
  569                 },
  570                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
  571         },
  572         {
  573                 /* Submitted by: Matthew Dodd <winter@jurai.net> */
  574                 { T_PROCESSOR, SIP_MEDIA_FIXED, "Cabletrn", "EA41*", "*" },
  575                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
  576         },
  577         {
  578                 /* Submitted by: Matthew Dodd <winter@jurai.net> */
  579                 { T_PROCESSOR, SIP_MEDIA_FIXED, "CABLETRN", "EA41*", "*" },
  580                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
  581         },
  582         {
  583                 /* TeraSolutions special settings for TRC-22 RAID */
  584                 { T_DIRECT, SIP_MEDIA_FIXED, "TERASOLU", "TRC-22", "*" },
  585                   /*quirks*/0, /*mintags*/55, /*maxtags*/255
  586         },
  587         {
  588                 /* Veritas Storage Appliance */
  589                 { T_DIRECT, SIP_MEDIA_FIXED, "VERITAS", "*", "*" },
  590                   CAM_QUIRK_HILUNS, /*mintags*/2, /*maxtags*/1024
  591         },
  592         {
  593                 /*
  594                  * Would respond to all LUNs.  Device type and removable
  595                  * flag are jumper-selectable.
  596                  */
  597                 { T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED, "MaxOptix",
  598                   "Tahiti 1", "*"
  599                 },
  600                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
  601         },
  602         {
  603                 /* EasyRAID E5A aka. areca ARC-6010 */
  604                 { T_DIRECT, SIP_MEDIA_FIXED, "easyRAID", "*", "*" },
  605                   CAM_QUIRK_NOHILUNS, /*mintags*/2, /*maxtags*/255
  606         },
  607         {
  608                 { T_ENCLOSURE, SIP_MEDIA_FIXED, "DP", "BACKPLANE", "*" },
  609                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
  610         },
  611         {
  612                 /* Default tagged queuing parameters for all devices */
  613                 {
  614                   T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
  615                   /*vendor*/"*", /*product*/"*", /*revision*/"*"
  616                 },
  617                 /*quirks*/0, /*mintags*/2, /*maxtags*/255
  618         },
  619 };
  620 
  621 static const int xpt_quirk_table_size =
  622         sizeof(xpt_quirk_table) / sizeof(*xpt_quirk_table);
  623 
  624 typedef enum {
  625         DM_RET_COPY             = 0x01,
  626         DM_RET_FLAG_MASK        = 0x0f,
  627         DM_RET_NONE             = 0x00,
  628         DM_RET_STOP             = 0x10,
  629         DM_RET_DESCEND          = 0x20,
  630         DM_RET_ERROR            = 0x30,
  631         DM_RET_ACTION_MASK      = 0xf0
  632 } dev_match_ret;
  633 
  634 typedef enum {
  635         XPT_DEPTH_BUS,
  636         XPT_DEPTH_TARGET,
  637         XPT_DEPTH_DEVICE,
  638         XPT_DEPTH_PERIPH
  639 } xpt_traverse_depth;
  640 
  641 struct xpt_traverse_config {
  642         xpt_traverse_depth      depth;
  643         void                    *tr_func;
  644         void                    *tr_arg;
  645 };
  646 
  647 typedef int     xpt_busfunc_t (struct cam_eb *bus, void *arg);
  648 typedef int     xpt_targetfunc_t (struct cam_et *target, void *arg);
  649 typedef int     xpt_devicefunc_t (struct cam_ed *device, void *arg);
  650 typedef int     xpt_periphfunc_t (struct cam_periph *periph, void *arg);
  651 typedef int     xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg);
  652 
  653 /* Transport layer configuration information */
  654 static struct xpt_softc xsoftc;
  655 
  656 /* Queues for our software interrupt handler */
  657 typedef TAILQ_HEAD(cam_isrq, ccb_hdr) cam_isrq_t;
  658 typedef TAILQ_HEAD(cam_simq, cam_sim) cam_simq_t;
  659 static cam_simq_t cam_simq;
  660 static struct mtx cam_simq_lock;
  661 
  662 /* Pointers to software interrupt handlers */
  663 static void *cambio_ih;
  664 
  665 struct cam_periph *xpt_periph;
  666 
  667 static periph_init_t xpt_periph_init;
  668 
  669 static periph_init_t probe_periph_init;
  670 
  671 static struct periph_driver xpt_driver =
  672 {
  673         xpt_periph_init, "xpt",
  674         TAILQ_HEAD_INITIALIZER(xpt_driver.units)
  675 };
  676 
  677 static struct periph_driver probe_driver =
  678 {
  679         probe_periph_init, "probe",
  680         TAILQ_HEAD_INITIALIZER(probe_driver.units)
  681 };
  682 
  683 PERIPHDRIVER_DECLARE(xpt, xpt_driver);
  684 PERIPHDRIVER_DECLARE(probe, probe_driver);
  685 
  686 
  687 static d_open_t xptopen;
  688 static d_close_t xptclose;
  689 static d_ioctl_t xptioctl;
  690 
  691 static struct cdevsw xpt_cdevsw = {
  692         .d_version =    D_VERSION,
  693         .d_flags =      0,
  694         .d_open =       xptopen,
  695         .d_close =      xptclose,
  696         .d_ioctl =      xptioctl,
  697         .d_name =       "xpt",
  698 };
  699 
  700 
  701 /* Storage for debugging datastructures */
  702 #ifdef  CAMDEBUG
  703 struct cam_path *cam_dpath;
  704 u_int32_t cam_dflags;
  705 u_int32_t cam_debug_delay;
  706 #endif
  707 
  708 #if defined(CAM_DEBUG_FLAGS) && !defined(CAMDEBUG)
  709 #error "You must have options CAMDEBUG to use options CAM_DEBUG_FLAGS"
  710 #endif
  711 
  712 /*
  713  * In order to enable the CAM_DEBUG_* options, the user must have CAMDEBUG
  714  * enabled.  Also, the user must have either none, or all of CAM_DEBUG_BUS,
  715  * CAM_DEBUG_TARGET, and CAM_DEBUG_LUN specified.
  716  */
  717 #if defined(CAM_DEBUG_BUS) || defined(CAM_DEBUG_TARGET) \
  718     || defined(CAM_DEBUG_LUN)
  719 #ifdef CAMDEBUG
  720 #if !defined(CAM_DEBUG_BUS) || !defined(CAM_DEBUG_TARGET) \
  721     || !defined(CAM_DEBUG_LUN)
  722 #error "You must define all or none of CAM_DEBUG_BUS, CAM_DEBUG_TARGET \
  723         and CAM_DEBUG_LUN"
  724 #endif /* !CAM_DEBUG_BUS || !CAM_DEBUG_TARGET || !CAM_DEBUG_LUN */
  725 #else /* !CAMDEBUG */
  726 #error "You must use options CAMDEBUG if you use the CAM_DEBUG_* options"
  727 #endif /* CAMDEBUG */
  728 #endif /* CAM_DEBUG_BUS || CAM_DEBUG_TARGET || CAM_DEBUG_LUN */
  729 
  730 /* Our boot-time initialization hook */
  731 static int cam_module_event_handler(module_t, int /*modeventtype_t*/, void *);
  732 
  733 static moduledata_t cam_moduledata = {
  734         "cam",
  735         cam_module_event_handler,
  736         NULL
  737 };
  738 
  739 static int      xpt_init(void *);
  740 
  741 DECLARE_MODULE(cam, cam_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND);
  742 MODULE_VERSION(cam, 1);
  743 
  744 
  745 static cam_status       xpt_compile_path(struct cam_path *new_path,
  746                                          struct cam_periph *perph,
  747                                          path_id_t path_id,
  748                                          target_id_t target_id,
  749                                          lun_id_t lun_id);
  750 
  751 static void             xpt_release_path(struct cam_path *path);
  752 
  753 static void             xpt_async_bcast(struct async_list *async_head,
  754                                         u_int32_t async_code,
  755                                         struct cam_path *path,
  756                                         void *async_arg);
  757 static void             xpt_dev_async(u_int32_t async_code,
  758                                       struct cam_eb *bus,
  759                                       struct cam_et *target,
  760                                       struct cam_ed *device,
  761                                       void *async_arg);
  762 static path_id_t xptnextfreepathid(void);
  763 static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus);
  764 static union ccb *xpt_get_ccb(struct cam_ed *device);
  765 static int       xpt_schedule_dev(struct camq *queue, cam_pinfo *dev_pinfo,
  766                                   u_int32_t new_priority);
  767 static void      xpt_run_dev_allocq(struct cam_eb *bus);
  768 static void      xpt_run_dev_sendq(struct cam_eb *bus);
  769 static timeout_t xpt_release_devq_timeout;
  770 static void      xpt_release_simq_timeout(void *arg) __unused;
  771 static void      xpt_release_bus(struct cam_eb *bus);
  772 static void      xpt_release_devq_device(struct cam_ed *dev, u_int count,
  773                                          int run_queue);
  774 static struct cam_et*
  775                  xpt_alloc_target(struct cam_eb *bus, target_id_t target_id);
  776 static void      xpt_release_target(struct cam_eb *bus, struct cam_et *target);
  777 static struct cam_ed*
  778                  xpt_alloc_device(struct cam_eb *bus, struct cam_et *target,
  779                                   lun_id_t lun_id);
  780 static void      xpt_release_device(struct cam_eb *bus, struct cam_et *target,
  781                                     struct cam_ed *device);
  782 static u_int32_t xpt_dev_ccbq_resize(struct cam_path *path, int newopenings);
  783 static struct cam_eb*
  784                  xpt_find_bus(path_id_t path_id);
  785 static struct cam_et*
  786                  xpt_find_target(struct cam_eb *bus, target_id_t target_id);
  787 static struct cam_ed*
  788                  xpt_find_device(struct cam_et *target, lun_id_t lun_id);
  789 static void      xpt_scan_bus(struct cam_periph *periph, union ccb *ccb);
  790 static void      xpt_scan_lun(struct cam_periph *periph,
  791                               struct cam_path *path, cam_flags flags,
  792                               union ccb *ccb);
  793 static void      xptscandone(struct cam_periph *periph, union ccb *done_ccb);
  794 static xpt_busfunc_t    xptconfigbuscountfunc;
  795 static xpt_busfunc_t    xptconfigfunc;
  796 static void      xpt_config(void *arg);
  797 static xpt_devicefunc_t xptpassannouncefunc;
  798 static void      xpt_finishconfig(struct cam_periph *periph, union ccb *ccb);
  799 static void      xptaction(struct cam_sim *sim, union ccb *work_ccb);
  800 static void      xptpoll(struct cam_sim *sim);
  801 static void      camisr(void *);
  802 static void      camisr_runqueue(void *);
  803 static dev_match_ret    xptbusmatch(struct dev_match_pattern *patterns,
  804                                     u_int num_patterns, struct cam_eb *bus);
  805 static dev_match_ret    xptdevicematch(struct dev_match_pattern *patterns,
  806                                        u_int num_patterns,
  807                                        struct cam_ed *device);
  808 static dev_match_ret    xptperiphmatch(struct dev_match_pattern *patterns,
  809                                        u_int num_patterns,
  810                                        struct cam_periph *periph);
  811 static xpt_busfunc_t    xptedtbusfunc;
  812 static xpt_targetfunc_t xptedttargetfunc;
  813 static xpt_devicefunc_t xptedtdevicefunc;
  814 static xpt_periphfunc_t xptedtperiphfunc;
  815 static xpt_pdrvfunc_t   xptplistpdrvfunc;
  816 static xpt_periphfunc_t xptplistperiphfunc;
  817 static int              xptedtmatch(struct ccb_dev_match *cdm);
  818 static int              xptperiphlistmatch(struct ccb_dev_match *cdm);
  819 static int              xptbustraverse(struct cam_eb *start_bus,
  820                                        xpt_busfunc_t *tr_func, void *arg);
  821 static int              xpttargettraverse(struct cam_eb *bus,
  822                                           struct cam_et *start_target,
  823                                           xpt_targetfunc_t *tr_func, void *arg);
  824 static int              xptdevicetraverse(struct cam_et *target,
  825                                           struct cam_ed *start_device,
  826                                           xpt_devicefunc_t *tr_func, void *arg);
  827 static int              xptperiphtraverse(struct cam_ed *device,
  828                                           struct cam_periph *start_periph,
  829                                           xpt_periphfunc_t *tr_func, void *arg);
  830 static int              xptpdrvtraverse(struct periph_driver **start_pdrv,
  831                                         xpt_pdrvfunc_t *tr_func, void *arg);
  832 static int              xptpdperiphtraverse(struct periph_driver **pdrv,
  833                                             struct cam_periph *start_periph,
  834                                             xpt_periphfunc_t *tr_func,
  835                                             void *arg);
  836 static xpt_busfunc_t    xptdefbusfunc;
  837 static xpt_targetfunc_t xptdeftargetfunc;
  838 static xpt_devicefunc_t xptdefdevicefunc;
  839 static xpt_periphfunc_t xptdefperiphfunc;
  840 static int              xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg);
  841 static int              xpt_for_all_devices(xpt_devicefunc_t *tr_func,
  842                                             void *arg);
  843 static xpt_devicefunc_t xptsetasyncfunc;
  844 static xpt_busfunc_t    xptsetasyncbusfunc;
  845 static cam_status       xptregister(struct cam_periph *periph,
  846                                     void *arg);
  847 static cam_status       proberegister(struct cam_periph *periph,
  848                                       void *arg);
  849 static void      probeschedule(struct cam_periph *probe_periph);
  850 static void      probestart(struct cam_periph *periph, union ccb *start_ccb);
  851 static void      proberequestdefaultnegotiation(struct cam_periph *periph);
  852 static int       proberequestbackoff(struct cam_periph *periph,
  853                                      struct cam_ed *device);
  854 static void      probedone(struct cam_periph *periph, union ccb *done_ccb);
  855 static void      probecleanup(struct cam_periph *periph);
  856 static void      xpt_find_quirk(struct cam_ed *device);
  857 static void      xpt_devise_transport(struct cam_path *path);
  858 static void      xpt_set_transfer_settings(struct ccb_trans_settings *cts,
  859                                            struct cam_ed *device,
  860                                            int async_update);
  861 static void      xpt_toggle_tags(struct cam_path *path);
  862 static void      xpt_start_tags(struct cam_path *path);
  863 static __inline int xpt_schedule_dev_allocq(struct cam_eb *bus,
  864                                             struct cam_ed *dev);
  865 static __inline int xpt_schedule_dev_sendq(struct cam_eb *bus,
  866                                            struct cam_ed *dev);
  867 static __inline int periph_is_queued(struct cam_periph *periph);
  868 static __inline int device_is_alloc_queued(struct cam_ed *device);
  869 static __inline int device_is_send_queued(struct cam_ed *device);
  870 static __inline int dev_allocq_is_runnable(struct cam_devq *devq);
  871 
  872 static __inline int
  873 xpt_schedule_dev_allocq(struct cam_eb *bus, struct cam_ed *dev)
  874 {
  875         int retval;
  876 
  877         if (dev->ccbq.devq_openings > 0) {
  878                 if ((dev->flags & CAM_DEV_RESIZE_QUEUE_NEEDED) != 0) {
  879                         cam_ccbq_resize(&dev->ccbq,
  880                                         dev->ccbq.dev_openings
  881                                         + dev->ccbq.dev_active);
  882                         dev->flags &= ~CAM_DEV_RESIZE_QUEUE_NEEDED;
  883                 }
  884                 /*
  885                  * The priority of a device waiting for CCB resources
  886                  * is that of the highest priority peripheral driver
  887                  * enqueued.
  888                  */
  889                 retval = xpt_schedule_dev(&bus->sim->devq->alloc_queue,
  890                                           &dev->alloc_ccb_entry.pinfo,
  891                                           CAMQ_GET_HEAD(&dev->drvq)->priority);
  892         } else {
  893                 retval = 0;
  894         }
  895 
  896         return (retval);
  897 }
  898 
  899 static __inline int
  900 xpt_schedule_dev_sendq(struct cam_eb *bus, struct cam_ed *dev)
  901 {
  902         int     retval;
  903 
  904         if (dev->ccbq.dev_openings > 0) {
  905                 /*
  906                  * The priority of a device waiting for controller
  907                  * resources is that of the highest priority CCB
  908                  * enqueued.
  909                  */
  910                 retval =
  911                     xpt_schedule_dev(&bus->sim->devq->send_queue,
  912                                      &dev->send_ccb_entry.pinfo,
  913                                      CAMQ_GET_HEAD(&dev->ccbq.queue)->priority);
  914         } else {
  915                 retval = 0;
  916         }
  917         return (retval);
  918 }
  919 
  920 static __inline int
  921 periph_is_queued(struct cam_periph *periph)
  922 {
  923         return (periph->pinfo.index != CAM_UNQUEUED_INDEX);
  924 }
  925 
  926 static __inline int
  927 device_is_alloc_queued(struct cam_ed *device)
  928 {
  929         return (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
  930 }
  931 
  932 static __inline int
  933 device_is_send_queued(struct cam_ed *device)
  934 {
  935         return (device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
  936 }
  937 
  938 static __inline int
  939 dev_allocq_is_runnable(struct cam_devq *devq)
  940 {
  941         /*
  942          * Have work to do.
  943          * Have space to do more work.
  944          * Allowed to do work.
  945          */
  946         return ((devq->alloc_queue.qfrozen_cnt == 0)
  947              && (devq->alloc_queue.entries > 0)
  948              && (devq->alloc_openings > 0));
  949 }
  950 
  951 static void
  952 xpt_periph_init()
  953 {
  954         make_dev(&xpt_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "xpt0");
  955 }
  956 
  957 static void
  958 probe_periph_init()
  959 {
  960 }
  961 
  962 
  963 static void
  964 xptdone(struct cam_periph *periph, union ccb *done_ccb)
  965 {
  966         /* Caller will release the CCB */
  967         wakeup(&done_ccb->ccb_h.cbfcnp);
  968 }
  969 
  970 static int
  971 xptopen(struct cdev *dev, int flags, int fmt, struct thread *td)
  972 {
  973 
  974         /*
  975          * Only allow read-write access.
  976          */
  977         if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0))
  978                 return(EPERM);
  979 
  980         /*
  981          * We don't allow nonblocking access.
  982          */
  983         if ((flags & O_NONBLOCK) != 0) {
  984                 printf("%s: can't do nonblocking access\n", devtoname(dev));
  985                 return(ENODEV);
  986         }
  987 
  988         /* Mark ourselves open */
  989         mtx_lock(&xsoftc.xpt_lock);
  990         xsoftc.flags |= XPT_FLAG_OPEN;
  991         mtx_unlock(&xsoftc.xpt_lock);
  992 
  993         return(0);
  994 }
  995 
  996 static int
  997 xptclose(struct cdev *dev, int flag, int fmt, struct thread *td)
  998 {
  999 
 1000         /* Mark ourselves closed */
 1001         mtx_lock(&xsoftc.xpt_lock);
 1002         xsoftc.flags &= ~XPT_FLAG_OPEN;
 1003         mtx_unlock(&xsoftc.xpt_lock);
 1004 
 1005         return(0);
 1006 }
 1007 
 1008 /*
 1009  * Don't automatically grab the xpt softc lock here even though this is going
 1010  * through the xpt device.  The xpt device is really just a back door for
 1011  * accessing other devices and SIMs, so the right thing to do is to grab
 1012  * the appropriate SIM lock once the bus/SIM is located.
 1013  */
 1014 static int
 1015 xptioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
 1016 {
 1017         int error;
 1018 
 1019         error = 0;
 1020 
 1021         switch(cmd) {
 1022         /*
 1023          * For the transport layer CAMIOCOMMAND ioctl, we really only want
 1024          * to accept CCB types that don't quite make sense to send through a
 1025          * passthrough driver. XPT_PATH_INQ is an exception to this, as stated
 1026          * in the CAM spec.
 1027          */
 1028         case CAMIOCOMMAND: {
 1029                 union ccb *ccb;
 1030                 union ccb *inccb;
 1031                 struct cam_eb *bus;
 1032 
 1033                 inccb = (union ccb *)addr;
 1034 
 1035                 bus = xpt_find_bus(inccb->ccb_h.path_id);
 1036                 if (bus == NULL) {
 1037                         error = EINVAL;
 1038                         break;
 1039                 }
 1040 
 1041                 switch(inccb->ccb_h.func_code) {
 1042                 case XPT_SCAN_BUS:
 1043                 case XPT_RESET_BUS:
 1044                         if ((inccb->ccb_h.target_id != CAM_TARGET_WILDCARD)
 1045                          || (inccb->ccb_h.target_lun != CAM_LUN_WILDCARD)) {
 1046                                 error = EINVAL;
 1047                                 break;
 1048                         }
 1049                         /* FALLTHROUGH */
 1050                 case XPT_PATH_INQ:
 1051                 case XPT_ENG_INQ:
 1052                 case XPT_SCAN_LUN:
 1053 
 1054                         ccb = xpt_alloc_ccb();
 1055 
 1056                         CAM_SIM_LOCK(bus->sim);
 1057 
 1058                         /*
 1059                          * Create a path using the bus, target, and lun the
 1060                          * user passed in.
 1061                          */
 1062                         if (xpt_create_path(&ccb->ccb_h.path, xpt_periph,
 1063                                             inccb->ccb_h.path_id,
 1064                                             inccb->ccb_h.target_id,
 1065                                             inccb->ccb_h.target_lun) !=
 1066                                             CAM_REQ_CMP){
 1067                                 error = EINVAL;
 1068                                 CAM_SIM_UNLOCK(bus->sim);
 1069                                 xpt_free_ccb(ccb);
 1070                                 break;
 1071                         }
 1072                         /* Ensure all of our fields are correct */
 1073                         xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path,
 1074                                       inccb->ccb_h.pinfo.priority);
 1075                         xpt_merge_ccb(ccb, inccb);
 1076                         ccb->ccb_h.cbfcnp = xptdone;
 1077                         cam_periph_runccb(ccb, NULL, 0, 0, NULL);
 1078                         bcopy(ccb, inccb, sizeof(union ccb));
 1079                         xpt_free_path(ccb->ccb_h.path);
 1080                         xpt_free_ccb(ccb);
 1081                         CAM_SIM_UNLOCK(bus->sim);
 1082                         break;
 1083 
 1084                 case XPT_DEBUG: {
 1085                         union ccb ccb;
 1086 
 1087                         /*
 1088                          * This is an immediate CCB, so it's okay to
 1089                          * allocate it on the stack.
 1090                          */
 1091 
 1092                         CAM_SIM_LOCK(bus->sim);
 1093 
 1094                         /*
 1095                          * Create a path using the bus, target, and lun the
 1096                          * user passed in.
 1097                          */
 1098                         if (xpt_create_path(&ccb.ccb_h.path, xpt_periph,
 1099                                             inccb->ccb_h.path_id,
 1100                                             inccb->ccb_h.target_id,
 1101                                             inccb->ccb_h.target_lun) !=
 1102                                             CAM_REQ_CMP){
 1103                                 error = EINVAL;
 1104                                 CAM_SIM_UNLOCK(bus->sim);
 1105                                 break;
 1106                         }
 1107                         /* Ensure all of our fields are correct */
 1108                         xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path,
 1109                                       inccb->ccb_h.pinfo.priority);
 1110                         xpt_merge_ccb(&ccb, inccb);
 1111                         ccb.ccb_h.cbfcnp = xptdone;
 1112                         xpt_action(&ccb);
 1113                         CAM_SIM_UNLOCK(bus->sim);
 1114                         bcopy(&ccb, inccb, sizeof(union ccb));
 1115                         xpt_free_path(ccb.ccb_h.path);
 1116                         break;
 1117 
 1118                 }
 1119                 case XPT_DEV_MATCH: {
 1120                         struct cam_periph_map_info mapinfo;
 1121                         struct cam_path *old_path;
 1122 
 1123                         /*
 1124                          * We can't deal with physical addresses for this
 1125                          * type of transaction.
 1126                          */
 1127                         if (inccb->ccb_h.flags & CAM_DATA_PHYS) {
 1128                                 error = EINVAL;
 1129                                 break;
 1130                         }
 1131 
 1132                         /*
 1133                          * Save this in case the caller had it set to
 1134                          * something in particular.
 1135                          */
 1136                         old_path = inccb->ccb_h.path;
 1137 
 1138                         /*
 1139                          * We really don't need a path for the matching
 1140                          * code.  The path is needed because of the
 1141                          * debugging statements in xpt_action().  They
 1142                          * assume that the CCB has a valid path.
 1143                          */
 1144                         inccb->ccb_h.path = xpt_periph->path;
 1145 
 1146                         bzero(&mapinfo, sizeof(mapinfo));
 1147 
 1148                         /*
 1149                          * Map the pattern and match buffers into kernel
 1150                          * virtual address space.
 1151                          */
 1152                         error = cam_periph_mapmem(inccb, &mapinfo);
 1153 
 1154                         if (error) {
 1155                                 inccb->ccb_h.path = old_path;
 1156                                 break;
 1157                         }
 1158 
 1159                         /*
 1160                          * This is an immediate CCB, we can send it on directly.
 1161                          */
 1162                         xpt_action(inccb);
 1163 
 1164                         /*
 1165                          * Map the buffers back into user space.
 1166                          */
 1167                         cam_periph_unmapmem(inccb, &mapinfo);
 1168 
 1169                         inccb->ccb_h.path = old_path;
 1170 
 1171                         error = 0;
 1172                         break;
 1173                 }
 1174                 default:
 1175                         error = ENOTSUP;
 1176                         break;
 1177                 }
 1178                 xpt_release_bus(bus);
 1179                 break;
 1180         }
 1181         /*
 1182          * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input,
 1183          * with the periphal driver name and unit name filled in.  The other
 1184          * fields don't really matter as input.  The passthrough driver name
 1185          * ("pass"), and unit number are passed back in the ccb.  The current
 1186          * device generation number, and the index into the device peripheral
 1187          * driver list, and the status are also passed back.  Note that
 1188          * since we do everything in one pass, unlike the XPT_GDEVLIST ccb,
 1189          * we never return a status of CAM_GDEVLIST_LIST_CHANGED.  It is
 1190          * (or rather should be) impossible for the device peripheral driver
 1191          * list to change since we look at the whole thing in one pass, and
 1192          * we do it with lock protection.
 1193          *
 1194          */
 1195         case CAMGETPASSTHRU: {
 1196                 union ccb *ccb;
 1197                 struct cam_periph *periph;
 1198                 struct periph_driver **p_drv;
 1199                 char   *name;
 1200                 u_int unit;
 1201                 u_int cur_generation;
 1202                 int base_periph_found;
 1203                 int splbreaknum;
 1204 
 1205                 ccb = (union ccb *)addr;
 1206                 unit = ccb->cgdl.unit_number;
 1207                 name = ccb->cgdl.periph_name;
 1208                 /*
 1209                  * Every 100 devices, we want to drop our lock protection to
 1210                  * give the software interrupt handler a chance to run.
 1211                  * Most systems won't run into this check, but this should
 1212                  * avoid starvation in the software interrupt handler in
 1213                  * large systems.
 1214                  */
 1215                 splbreaknum = 100;
 1216 
 1217                 ccb = (union ccb *)addr;
 1218 
 1219                 base_periph_found = 0;
 1220 
 1221                 /*
 1222                  * Sanity check -- make sure we don't get a null peripheral
 1223                  * driver name.
 1224                  */
 1225                 if (*ccb->cgdl.periph_name == '\0') {
 1226                         error = EINVAL;
 1227                         break;
 1228                 }
 1229 
 1230                 /* Keep the list from changing while we traverse it */
 1231                 mtx_lock(&xsoftc.xpt_topo_lock);
 1232 ptstartover:
 1233                 cur_generation = xsoftc.xpt_generation;
 1234 
 1235                 /* first find our driver in the list of drivers */
 1236                 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++)
 1237                         if (strcmp((*p_drv)->driver_name, name) == 0)
 1238                                 break;
 1239 
 1240                 if (*p_drv == NULL) {
 1241                         mtx_unlock(&xsoftc.xpt_topo_lock);
 1242                         ccb->ccb_h.status = CAM_REQ_CMP_ERR;
 1243                         ccb->cgdl.status = CAM_GDEVLIST_ERROR;
 1244                         *ccb->cgdl.periph_name = '\0';
 1245                         ccb->cgdl.unit_number = 0;
 1246                         error = ENOENT;
 1247                         break;
 1248                 }
 1249 
 1250                 /*
 1251                  * Run through every peripheral instance of this driver
 1252                  * and check to see whether it matches the unit passed
 1253                  * in by the user.  If it does, get out of the loops and
 1254                  * find the passthrough driver associated with that
 1255                  * peripheral driver.
 1256                  */
 1257                 for (periph = TAILQ_FIRST(&(*p_drv)->units); periph != NULL;
 1258                      periph = TAILQ_NEXT(periph, unit_links)) {
 1259 
 1260                         if (periph->unit_number == unit) {
 1261                                 break;
 1262                         } else if (--splbreaknum == 0) {
 1263                                 mtx_unlock(&xsoftc.xpt_topo_lock);
 1264                                 mtx_lock(&xsoftc.xpt_topo_lock);
 1265                                 splbreaknum = 100;
 1266                                 if (cur_generation != xsoftc.xpt_generation)
 1267                                        goto ptstartover;
 1268                         }
 1269                 }
 1270                 /*
 1271                  * If we found the peripheral driver that the user passed
 1272                  * in, go through all of the peripheral drivers for that
 1273                  * particular device and look for a passthrough driver.
 1274                  */
 1275                 if (periph != NULL) {
 1276                         struct cam_ed *device;
 1277                         int i;
 1278 
 1279                         base_periph_found = 1;
 1280                         device = periph->path->device;
 1281                         for (i = 0, periph = SLIST_FIRST(&device->periphs);
 1282                              periph != NULL;
 1283                              periph = SLIST_NEXT(periph, periph_links), i++) {
 1284                                 /*
 1285                                  * Check to see whether we have a
 1286                                  * passthrough device or not.
 1287                                  */
 1288                                 if (strcmp(periph->periph_name, "pass") == 0) {
 1289                                         /*
 1290                                          * Fill in the getdevlist fields.
 1291                                          */
 1292                                         strcpy(ccb->cgdl.periph_name,
 1293                                                periph->periph_name);
 1294                                         ccb->cgdl.unit_number =
 1295                                                 periph->unit_number;
 1296                                         if (SLIST_NEXT(periph, periph_links))
 1297                                                 ccb->cgdl.status =
 1298                                                         CAM_GDEVLIST_MORE_DEVS;
 1299                                         else
 1300                                                 ccb->cgdl.status =
 1301                                                        CAM_GDEVLIST_LAST_DEVICE;
 1302                                         ccb->cgdl.generation =
 1303                                                 device->generation;
 1304                                         ccb->cgdl.index = i;
 1305                                         /*
 1306                                          * Fill in some CCB header fields
 1307                                          * that the user may want.
 1308                                          */
 1309                                         ccb->ccb_h.path_id =
 1310                                                 periph->path->bus->path_id;
 1311                                         ccb->ccb_h.target_id =
 1312                                                 periph->path->target->target_id;
 1313                                         ccb->ccb_h.target_lun =
 1314                                                 periph->path->device->lun_id;
 1315                                         ccb->ccb_h.status = CAM_REQ_CMP;
 1316                                         break;
 1317                                 }
 1318                         }
 1319                 }
 1320 
 1321                 /*
 1322                  * If the periph is null here, one of two things has
 1323                  * happened.  The first possibility is that we couldn't
 1324                  * find the unit number of the particular peripheral driver
 1325                  * that the user is asking about.  e.g. the user asks for
 1326                  * the passthrough driver for "da11".  We find the list of
 1327                  * "da" peripherals all right, but there is no unit 11.
 1328                  * The other possibility is that we went through the list
 1329                  * of peripheral drivers attached to the device structure,
 1330                  * but didn't find one with the name "pass".  Either way,
 1331                  * we return ENOENT, since we couldn't find something.
 1332                  */
 1333                 if (periph == NULL) {
 1334                         ccb->ccb_h.status = CAM_REQ_CMP_ERR;
 1335                         ccb->cgdl.status = CAM_GDEVLIST_ERROR;
 1336                         *ccb->cgdl.periph_name = '\0';
 1337                         ccb->cgdl.unit_number = 0;
 1338                         error = ENOENT;
 1339                         /*
 1340                          * It is unfortunate that this is even necessary,
 1341                          * but there are many, many clueless users out there.
 1342                          * If this is true, the user is looking for the
 1343                          * passthrough driver, but doesn't have one in his
 1344                          * kernel.
 1345                          */
 1346                         if (base_periph_found == 1) {
 1347                                 printf("xptioctl: pass driver is not in the "
 1348                                        "kernel\n");
 1349                                 printf("xptioctl: put \"device pass\" in "
 1350                                        "your kernel config file\n");
 1351                         }
 1352                 }
 1353                 mtx_unlock(&xsoftc.xpt_topo_lock);
 1354                 break;
 1355                 }
 1356         default:
 1357                 error = ENOTTY;
 1358                 break;
 1359         }
 1360 
 1361         return(error);
 1362 }
 1363 
 1364 static int
 1365 cam_module_event_handler(module_t mod, int what, void *arg)
 1366 {
 1367         int error;
 1368 
 1369         switch (what) {
 1370         case MOD_LOAD:
 1371                 if ((error = xpt_init(NULL)) != 0)
 1372                         return (error);
 1373                 break;
 1374         case MOD_UNLOAD:
 1375                 return EBUSY;
 1376         default:
 1377                 return EOPNOTSUPP;
 1378         }
 1379 
 1380         return 0;
 1381 }
 1382 
 1383 /* thread to handle bus rescans */
 1384 static void
 1385 xpt_scanner_thread(void *dummy)
 1386 {
 1387         cam_isrq_t      queue;
 1388         union ccb       *ccb;
 1389         struct cam_sim  *sim;
 1390 
 1391         for (;;) {
 1392                 /*
 1393                  * Wait for a rescan request to come in.  When it does, splice
 1394                  * it onto a queue from local storage so that the xpt lock
 1395                  * doesn't need to be held while the requests are being
 1396                  * processed.
 1397                  */
 1398                 xpt_lock_buses();
 1399                 msleep(&xsoftc.ccb_scanq, &xsoftc.xpt_topo_lock, PRIBIO,
 1400                     "ccb_scanq", 0);
 1401                 TAILQ_INIT(&queue);
 1402                 TAILQ_CONCAT(&queue, &xsoftc.ccb_scanq, sim_links.tqe);
 1403                 xpt_unlock_buses();
 1404 
 1405                 while ((ccb = (union ccb *)TAILQ_FIRST(&queue)) != NULL) {
 1406                         TAILQ_REMOVE(&queue, &ccb->ccb_h, sim_links.tqe);
 1407 
 1408                         sim = ccb->ccb_h.path->bus->sim;
 1409                         CAM_SIM_LOCK(sim);
 1410 
 1411                         ccb->ccb_h.func_code = XPT_SCAN_BUS;
 1412                         ccb->ccb_h.cbfcnp = xptdone;
 1413                         xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, 5);
 1414                         cam_periph_runccb(ccb, NULL, 0, 0, NULL);
 1415                         xpt_free_path(ccb->ccb_h.path);
 1416                         xpt_free_ccb(ccb);
 1417                         CAM_SIM_UNLOCK(sim);
 1418                 }
 1419         }
 1420 }
 1421 
 1422 void
 1423 xpt_rescan(union ccb *ccb)
 1424 {
 1425         struct ccb_hdr *hdr;
 1426 
 1427         /*
 1428          * Don't make duplicate entries for the same paths.
 1429          */
 1430         xpt_lock_buses();
 1431         TAILQ_FOREACH(hdr, &xsoftc.ccb_scanq, sim_links.tqe) {
 1432                 if (xpt_path_comp(hdr->path, ccb->ccb_h.path) == 0) {
 1433                         xpt_unlock_buses();
 1434                         xpt_print(ccb->ccb_h.path, "rescan already queued\n");
 1435                         xpt_free_path(ccb->ccb_h.path);
 1436                         xpt_free_ccb(ccb);
 1437                         return;
 1438                 }
 1439         }
 1440         TAILQ_INSERT_TAIL(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe);
 1441         wakeup(&xsoftc.ccb_scanq);
 1442         xpt_unlock_buses();
 1443 }
 1444 
 1445 /* Functions accessed by the peripheral drivers */
 1446 static int
 1447 xpt_init(void *dummy)
 1448 {
 1449         struct cam_sim *xpt_sim;
 1450         struct cam_path *path;
 1451         struct cam_devq *devq;
 1452         cam_status status;
 1453 
 1454         TAILQ_INIT(&xsoftc.xpt_busses);
 1455         TAILQ_INIT(&cam_simq);
 1456         TAILQ_INIT(&xsoftc.ccb_scanq);
 1457         STAILQ_INIT(&xsoftc.highpowerq);
 1458         xsoftc.num_highpower = CAM_MAX_HIGHPOWER;
 1459 
 1460         mtx_init(&cam_simq_lock, "CAM SIMQ lock", NULL, MTX_DEF);
 1461         mtx_init(&xsoftc.xpt_lock, "XPT lock", NULL, MTX_DEF);
 1462         mtx_init(&xsoftc.xpt_topo_lock, "XPT topology lock", NULL, MTX_DEF);
 1463 
 1464         /*
 1465          * The xpt layer is, itself, the equivelent of a SIM.
 1466          * Allow 16 ccbs in the ccb pool for it.  This should
 1467          * give decent parallelism when we probe busses and
 1468          * perform other XPT functions.
 1469          */
 1470         devq = cam_simq_alloc(16);
 1471         xpt_sim = cam_sim_alloc(xptaction,
 1472                                 xptpoll,
 1473                                 "xpt",
 1474                                 /*softc*/NULL,
 1475                                 /*unit*/0,
 1476                                 /*mtx*/&xsoftc.xpt_lock,
 1477                                 /*max_dev_transactions*/0,
 1478                                 /*max_tagged_dev_transactions*/0,
 1479                                 devq);
 1480         if (xpt_sim == NULL)
 1481                 return (ENOMEM);
 1482 
 1483         xpt_sim->max_ccbs = 16;
 1484 
 1485         mtx_lock(&xsoftc.xpt_lock);
 1486         if ((status = xpt_bus_register(xpt_sim, NULL, 0)) != CAM_SUCCESS) {
 1487                 printf("xpt_init: xpt_bus_register failed with status %#x,"
 1488                        " failing attach\n", status);
 1489                 return (EINVAL);
 1490         }
 1491 
 1492         /*
 1493          * Looking at the XPT from the SIM layer, the XPT is
 1494          * the equivelent of a peripheral driver.  Allocate
 1495          * a peripheral driver entry for us.
 1496          */
 1497         if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID,
 1498                                       CAM_TARGET_WILDCARD,
 1499                                       CAM_LUN_WILDCARD)) != CAM_REQ_CMP) {
 1500                 printf("xpt_init: xpt_create_path failed with status %#x,"
 1501                        " failing attach\n", status);
 1502                 return (EINVAL);
 1503         }
 1504 
 1505         cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO,
 1506                          path, NULL, 0, xpt_sim);
 1507         xpt_free_path(path);
 1508         mtx_unlock(&xsoftc.xpt_lock);
 1509 
 1510         /*
 1511          * Register a callback for when interrupts are enabled.
 1512          */
 1513         xsoftc.xpt_config_hook =
 1514             (struct intr_config_hook *)malloc(sizeof(struct intr_config_hook),
 1515                                               M_CAMXPT, M_NOWAIT | M_ZERO);
 1516         if (xsoftc.xpt_config_hook == NULL) {
 1517                 printf("xpt_init: Cannot malloc config hook "
 1518                        "- failing attach\n");
 1519                 return (ENOMEM);
 1520         }
 1521 
 1522         xsoftc.xpt_config_hook->ich_func = xpt_config;
 1523         if (config_intrhook_establish(xsoftc.xpt_config_hook) != 0) {
 1524                 free (xsoftc.xpt_config_hook, M_CAMXPT);
 1525                 printf("xpt_init: config_intrhook_establish failed "
 1526                        "- failing attach\n");
 1527         }
 1528 
 1529         /* fire up rescan thread */
 1530         if (kthread_create(xpt_scanner_thread, NULL, NULL, 0, 0, "xpt_thrd")) {
 1531                 printf("xpt_init: failed to create rescan thread\n");
 1532         }
 1533         /* Install our software interrupt handlers */
 1534         swi_add(NULL, "cambio", camisr, NULL, SWI_CAMBIO, INTR_MPSAFE, &cambio_ih);
 1535 
 1536         return (0);
 1537 }
 1538 
 1539 static cam_status
 1540 xptregister(struct cam_periph *periph, void *arg)
 1541 {
 1542         struct cam_sim *xpt_sim;
 1543 
 1544         if (periph == NULL) {
 1545                 printf("xptregister: periph was NULL!!\n");
 1546                 return(CAM_REQ_CMP_ERR);
 1547         }
 1548 
 1549         xpt_sim = (struct cam_sim *)arg;
 1550         xpt_sim->softc = periph;
 1551         xpt_periph = periph;
 1552         periph->softc = NULL;
 1553 
 1554         return(CAM_REQ_CMP);
 1555 }
 1556 
 1557 int32_t
 1558 xpt_add_periph(struct cam_periph *periph)
 1559 {
 1560         struct cam_ed *device;
 1561         int32_t  status;
 1562         struct periph_list *periph_head;
 1563 
 1564         mtx_assert(periph->sim->mtx, MA_OWNED);
 1565 
 1566         device = periph->path->device;
 1567 
 1568         periph_head = &device->periphs;
 1569 
 1570         status = CAM_REQ_CMP;
 1571 
 1572         if (device != NULL) {
 1573                 /*
 1574                  * Make room for this peripheral
 1575                  * so it will fit in the queue
 1576                  * when it's scheduled to run
 1577                  */
 1578                 status = camq_resize(&device->drvq,
 1579                                      device->drvq.array_size + 1);
 1580 
 1581                 device->generation++;
 1582 
 1583                 SLIST_INSERT_HEAD(periph_head, periph, periph_links);
 1584         }
 1585 
 1586         mtx_lock(&xsoftc.xpt_topo_lock);
 1587         xsoftc.xpt_generation++;
 1588         mtx_unlock(&xsoftc.xpt_topo_lock);
 1589 
 1590         return (status);
 1591 }
 1592 
 1593 void
 1594 xpt_remove_periph(struct cam_periph *periph)
 1595 {
 1596         struct cam_ed *device;
 1597 
 1598         mtx_assert(periph->sim->mtx, MA_OWNED);
 1599 
 1600         device = periph->path->device;
 1601 
 1602         if (device != NULL) {
 1603                 struct periph_list *periph_head;
 1604 
 1605                 periph_head = &device->periphs;
 1606 
 1607                 /* Release the slot for this peripheral */
 1608                 camq_resize(&device->drvq, device->drvq.array_size - 1);
 1609 
 1610                 device->generation++;
 1611 
 1612                 SLIST_REMOVE(periph_head, periph, cam_periph, periph_links);
 1613         }
 1614 
 1615         mtx_lock(&xsoftc.xpt_topo_lock);
 1616         xsoftc.xpt_generation++;
 1617         mtx_unlock(&xsoftc.xpt_topo_lock);
 1618 }
 1619 
 1620 
 1621 void
 1622 xpt_announce_periph(struct cam_periph *periph, char *announce_string)
 1623 {
 1624         struct  ccb_pathinq cpi;
 1625         struct  ccb_trans_settings cts;
 1626         struct  cam_path *path;
 1627         u_int   speed;
 1628         u_int   freq;
 1629         u_int   mb;
 1630 
 1631         mtx_assert(periph->sim->mtx, MA_OWNED);
 1632 
 1633         path = periph->path;
 1634         /*
 1635          * To ensure that this is printed in one piece,
 1636          * mask out CAM interrupts.
 1637          */
 1638         printf("%s%d at %s%d bus %d target %d lun %d\n",
 1639                periph->periph_name, periph->unit_number,
 1640                path->bus->sim->sim_name,
 1641                path->bus->sim->unit_number,
 1642                path->bus->sim->bus_id,
 1643                path->target->target_id,
 1644                path->device->lun_id);
 1645         printf("%s%d: ", periph->periph_name, periph->unit_number);
 1646         scsi_print_inquiry(&path->device->inq_data);
 1647         if (bootverbose && path->device->serial_num_len > 0) {
 1648                 /* Don't wrap the screen  - print only the first 60 chars */
 1649                 printf("%s%d: Serial Number %.60s\n", periph->periph_name,
 1650                        periph->unit_number, path->device->serial_num);
 1651         }
 1652         xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1);
 1653         cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
 1654         cts.type = CTS_TYPE_CURRENT_SETTINGS;
 1655         xpt_action((union ccb*)&cts);
 1656         if ((cts.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
 1657                 return;
 1658         }
 1659 
 1660         /* Ask the SIM for its base transfer speed */
 1661         xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
 1662         cpi.ccb_h.func_code = XPT_PATH_INQ;
 1663         xpt_action((union ccb *)&cpi);
 1664 
 1665         speed = cpi.base_transfer_speed;
 1666         freq = 0;
 1667         if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SPI) {
 1668                 struct  ccb_trans_settings_spi *spi;
 1669 
 1670                 spi = &cts.xport_specific.spi;
 1671                 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0
 1672                   && spi->sync_offset != 0) {
 1673                         freq = scsi_calc_syncsrate(spi->sync_period);
 1674                         speed = freq;
 1675                 }
 1676 
 1677                 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0)
 1678                         speed *= (0x01 << spi->bus_width);
 1679         }
 1680 
 1681         if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_FC) {
 1682                 struct  ccb_trans_settings_fc *fc = &cts.xport_specific.fc;
 1683                 if (fc->valid & CTS_FC_VALID_SPEED) {
 1684                         speed = fc->bitrate;
 1685                 }
 1686         }
 1687 
 1688         if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SAS) {
 1689                 struct  ccb_trans_settings_sas *sas = &cts.xport_specific.sas;
 1690                 if (sas->valid & CTS_SAS_VALID_SPEED) {
 1691                         speed = sas->bitrate;
 1692                 }
 1693         }
 1694 
 1695         mb = speed / 1000;
 1696         if (mb > 0)
 1697                 printf("%s%d: %d.%03dMB/s transfers",
 1698                        periph->periph_name, periph->unit_number,
 1699                        mb, speed % 1000);
 1700         else
 1701                 printf("%s%d: %dKB/s transfers", periph->periph_name,
 1702                        periph->unit_number, speed);
 1703         /* Report additional information about SPI connections */
 1704         if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SPI) {
 1705                 struct  ccb_trans_settings_spi *spi;
 1706 
 1707                 spi = &cts.xport_specific.spi;
 1708                 if (freq != 0) {
 1709                         printf(" (%d.%03dMHz%s, offset %d", freq / 1000,
 1710                                freq % 1000,
 1711                                (spi->ppr_options & MSG_EXT_PPR_DT_REQ) != 0
 1712                              ? " DT" : "",
 1713                                spi->sync_offset);
 1714                 }
 1715                 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0
 1716                  && spi->bus_width > 0) {
 1717                         if (freq != 0) {
 1718                                 printf(", ");
 1719                         } else {
 1720                                 printf(" (");
 1721                         }
 1722                         printf("%dbit)", 8 * (0x01 << spi->bus_width));
 1723                 } else if (freq != 0) {
 1724                         printf(")");
 1725                 }
 1726         }
 1727         if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_FC) {
 1728                 struct  ccb_trans_settings_fc *fc;
 1729 
 1730                 fc = &cts.xport_specific.fc;
 1731                 if (fc->valid & CTS_FC_VALID_WWNN)
 1732                         printf(" WWNN 0x%llx", (long long) fc->wwnn);
 1733                 if (fc->valid & CTS_FC_VALID_WWPN)
 1734                         printf(" WWPN 0x%llx", (long long) fc->wwpn);
 1735                 if (fc->valid & CTS_FC_VALID_PORT)
 1736                         printf(" PortID 0x%x", fc->port);
 1737         }
 1738 
 1739         if (path->device->inq_flags & SID_CmdQue
 1740          || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
 1741                 printf("\n%s%d: Command Queueing Enabled",
 1742                        periph->periph_name, periph->unit_number);
 1743         }
 1744         printf("\n");
 1745 
 1746         /*
 1747          * We only want to print the caller's announce string if they've
 1748          * passed one in..
 1749          */
 1750         if (announce_string != NULL)
 1751                 printf("%s%d: %s\n", periph->periph_name,
 1752                        periph->unit_number, announce_string);
 1753 }
 1754 
 1755 static dev_match_ret
 1756 xptbusmatch(struct dev_match_pattern *patterns, u_int num_patterns,
 1757             struct cam_eb *bus)
 1758 {
 1759         dev_match_ret retval;
 1760         int i;
 1761 
 1762         retval = DM_RET_NONE;
 1763 
 1764         /*
 1765          * If we aren't given something to match against, that's an error.
 1766          */
 1767         if (bus == NULL)
 1768                 return(DM_RET_ERROR);
 1769 
 1770         /*
 1771          * If there are no match entries, then this bus matches no
 1772          * matter what.
 1773          */
 1774         if ((patterns == NULL) || (num_patterns == 0))
 1775                 return(DM_RET_DESCEND | DM_RET_COPY);
 1776 
 1777         for (i = 0; i < num_patterns; i++) {
 1778                 struct bus_match_pattern *cur_pattern;
 1779 
 1780                 /*
 1781                  * If the pattern in question isn't for a bus node, we
 1782                  * aren't interested.  However, we do indicate to the
 1783                  * calling routine that we should continue descending the
 1784                  * tree, since the user wants to match against lower-level
 1785                  * EDT elements.
 1786                  */
 1787                 if (patterns[i].type != DEV_MATCH_BUS) {
 1788                         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
 1789                                 retval |= DM_RET_DESCEND;
 1790                         continue;
 1791                 }
 1792 
 1793                 cur_pattern = &patterns[i].pattern.bus_pattern;
 1794 
 1795                 /*
 1796                  * If they want to match any bus node, we give them any
 1797                  * device node.
 1798                  */
 1799                 if (cur_pattern->flags == BUS_MATCH_ANY) {
 1800                         /* set the copy flag */
 1801                         retval |= DM_RET_COPY;
 1802 
 1803                         /*
 1804                          * If we've already decided on an action, go ahead
 1805                          * and return.
 1806                          */
 1807                         if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
 1808                                 return(retval);
 1809                 }
 1810 
 1811                 /*
 1812                  * Not sure why someone would do this...
 1813                  */
 1814                 if (cur_pattern->flags == BUS_MATCH_NONE)
 1815                         continue;
 1816 
 1817                 if (((cur_pattern->flags & BUS_MATCH_PATH) != 0)
 1818                  && (cur_pattern->path_id != bus->path_id))
 1819                         continue;
 1820 
 1821                 if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0)
 1822                  && (cur_pattern->bus_id != bus->sim->bus_id))
 1823                         continue;
 1824 
 1825                 if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0)
 1826                  && (cur_pattern->unit_number != bus->sim->unit_number))
 1827                         continue;
 1828 
 1829                 if (((cur_pattern->flags & BUS_MATCH_NAME) != 0)
 1830                  && (strncmp(cur_pattern->dev_name, bus->sim->sim_name,
 1831                              DEV_IDLEN) != 0))
 1832                         continue;
 1833 
 1834                 /*
 1835                  * If we get to this point, the user definitely wants
 1836                  * information on this bus.  So tell the caller to copy the
 1837                  * data out.
 1838                  */
 1839                 retval |= DM_RET_COPY;
 1840 
 1841                 /*
 1842                  * If the return action has been set to descend, then we
 1843                  * know that we've already seen a non-bus matching
 1844                  * expression, therefore we need to further descend the tree.
 1845                  * This won't change by continuing around the loop, so we
 1846                  * go ahead and return.  If we haven't seen a non-bus
 1847                  * matching expression, we keep going around the loop until
 1848                  * we exhaust the matching expressions.  We'll set the stop
 1849                  * flag once we fall out of the loop.
 1850                  */
 1851                 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
 1852                         return(retval);
 1853         }
 1854 
 1855         /*
 1856          * If the return action hasn't been set to descend yet, that means
 1857          * we haven't seen anything other than bus matching patterns.  So
 1858          * tell the caller to stop descending the tree -- the user doesn't
 1859          * want to match against lower level tree elements.
 1860          */
 1861         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
 1862                 retval |= DM_RET_STOP;
 1863 
 1864         return(retval);
 1865 }
 1866 
 1867 static dev_match_ret
 1868 xptdevicematch(struct dev_match_pattern *patterns, u_int num_patterns,
 1869                struct cam_ed *device)
 1870 {
 1871         dev_match_ret retval;
 1872         int i;
 1873 
 1874         retval = DM_RET_NONE;
 1875 
 1876         /*
 1877          * If we aren't given something to match against, that's an error.
 1878          */
 1879         if (device == NULL)
 1880                 return(DM_RET_ERROR);
 1881 
 1882         /*
 1883          * If there are no match entries, then this device matches no
 1884          * matter what.
 1885          */
 1886         if ((patterns == NULL) || (num_patterns == 0))
 1887                 return(DM_RET_DESCEND | DM_RET_COPY);
 1888 
 1889         for (i = 0; i < num_patterns; i++) {
 1890                 struct device_match_pattern *cur_pattern;
 1891 
 1892                 /*
 1893                  * If the pattern in question isn't for a device node, we
 1894                  * aren't interested.
 1895                  */
 1896                 if (patterns[i].type != DEV_MATCH_DEVICE) {
 1897                         if ((patterns[i].type == DEV_MATCH_PERIPH)
 1898                          && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE))
 1899                                 retval |= DM_RET_DESCEND;
 1900                         continue;
 1901                 }
 1902 
 1903                 cur_pattern = &patterns[i].pattern.device_pattern;
 1904 
 1905                 /*
 1906                  * If they want to match any device node, we give them any
 1907                  * device node.
 1908                  */
 1909                 if (cur_pattern->flags == DEV_MATCH_ANY) {
 1910                         /* set the copy flag */
 1911                         retval |= DM_RET_COPY;
 1912 
 1913 
 1914                         /*
 1915                          * If we've already decided on an action, go ahead
 1916                          * and return.
 1917                          */
 1918                         if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
 1919                                 return(retval);
 1920                 }
 1921 
 1922                 /*
 1923                  * Not sure why someone would do this...
 1924                  */
 1925                 if (cur_pattern->flags == DEV_MATCH_NONE)
 1926                         continue;
 1927 
 1928                 if (((cur_pattern->flags & DEV_MATCH_PATH) != 0)
 1929                  && (cur_pattern->path_id != device->target->bus->path_id))
 1930                         continue;
 1931 
 1932                 if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0)
 1933                  && (cur_pattern->target_id != device->target->target_id))
 1934                         continue;
 1935 
 1936                 if (((cur_pattern->flags & DEV_MATCH_LUN) != 0)
 1937                  && (cur_pattern->target_lun != device->lun_id))
 1938                         continue;
 1939 
 1940                 if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0)
 1941                  && (cam_quirkmatch((caddr_t)&device->inq_data,
 1942                                     (caddr_t)&cur_pattern->inq_pat,
 1943                                     1, sizeof(cur_pattern->inq_pat),
 1944                                     scsi_static_inquiry_match) == NULL))
 1945                         continue;
 1946 
 1947                 /*
 1948                  * If we get to this point, the user definitely wants
 1949                  * information on this device.  So tell the caller to copy
 1950                  * the data out.
 1951                  */
 1952                 retval |= DM_RET_COPY;
 1953 
 1954                 /*
 1955                  * If the return action has been set to descend, then we
 1956                  * know that we've already seen a peripheral matching
 1957                  * expression, therefore we need to further descend the tree.
 1958                  * This won't change by continuing around the loop, so we
 1959                  * go ahead and return.  If we haven't seen a peripheral
 1960                  * matching expression, we keep going around the loop until
 1961                  * we exhaust the matching expressions.  We'll set the stop
 1962                  * flag once we fall out of the loop.
 1963                  */
 1964                 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
 1965                         return(retval);
 1966         }
 1967 
 1968         /*
 1969          * If the return action hasn't been set to descend yet, that means
 1970          * we haven't seen any peripheral matching patterns.  So tell the
 1971          * caller to stop descending the tree -- the user doesn't want to
 1972          * match against lower level tree elements.
 1973          */
 1974         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
 1975                 retval |= DM_RET_STOP;
 1976 
 1977         return(retval);
 1978 }
 1979 
 1980 /*
 1981  * Match a single peripheral against any number of match patterns.
 1982  */
 1983 static dev_match_ret
 1984 xptperiphmatch(struct dev_match_pattern *patterns, u_int num_patterns,
 1985                struct cam_periph *periph)
 1986 {
 1987         dev_match_ret retval;
 1988         int i;
 1989 
 1990         /*
 1991          * If we aren't given something to match against, that's an error.
 1992          */
 1993         if (periph == NULL)
 1994                 return(DM_RET_ERROR);
 1995 
 1996         /*
 1997          * If there are no match entries, then this peripheral matches no
 1998          * matter what.
 1999          */
 2000         if ((patterns == NULL) || (num_patterns == 0))
 2001                 return(DM_RET_STOP | DM_RET_COPY);
 2002 
 2003         /*
 2004          * There aren't any nodes below a peripheral node, so there's no
 2005          * reason to descend the tree any further.
 2006          */
 2007         retval = DM_RET_STOP;
 2008 
 2009         for (i = 0; i < num_patterns; i++) {
 2010                 struct periph_match_pattern *cur_pattern;
 2011 
 2012                 /*
 2013                  * If the pattern in question isn't for a peripheral, we
 2014                  * aren't interested.
 2015                  */
 2016                 if (patterns[i].type != DEV_MATCH_PERIPH)
 2017                         continue;
 2018 
 2019                 cur_pattern = &patterns[i].pattern.periph_pattern;
 2020 
 2021                 /*
 2022                  * If they want to match on anything, then we will do so.
 2023                  */
 2024                 if (cur_pattern->flags == PERIPH_MATCH_ANY) {
 2025                         /* set the copy flag */
 2026                         retval |= DM_RET_COPY;
 2027 
 2028                         /*
 2029                          * We've already set the return action to stop,
 2030                          * since there are no nodes below peripherals in
 2031                          * the tree.
 2032                          */
 2033                         return(retval);
 2034                 }
 2035 
 2036                 /*
 2037                  * Not sure why someone would do this...
 2038                  */
 2039                 if (cur_pattern->flags == PERIPH_MATCH_NONE)
 2040                         continue;
 2041 
 2042                 if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0)
 2043                  && (cur_pattern->path_id != periph->path->bus->path_id))
 2044                         continue;
 2045 
 2046                 /*
 2047                  * For the target and lun id's, we have to make sure the
 2048                  * target and lun pointers aren't NULL.  The xpt peripheral
 2049                  * has a wildcard target and device.
 2050                  */
 2051                 if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0)
 2052                  && ((periph->path->target == NULL)
 2053                  ||(cur_pattern->target_id != periph->path->target->target_id)))
 2054                         continue;
 2055 
 2056                 if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0)
 2057                  && ((periph->path->device == NULL)
 2058                  || (cur_pattern->target_lun != periph->path->device->lun_id)))
 2059                         continue;
 2060 
 2061                 if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0)
 2062                  && (cur_pattern->unit_number != periph->unit_number))
 2063                         continue;
 2064 
 2065                 if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0)
 2066                  && (strncmp(cur_pattern->periph_name, periph->periph_name,
 2067                              DEV_IDLEN) != 0))
 2068                         continue;
 2069 
 2070                 /*
 2071                  * If we get to this point, the user definitely wants
 2072                  * information on this peripheral.  So tell the caller to
 2073                  * copy the data out.
 2074                  */
 2075                 retval |= DM_RET_COPY;
 2076 
 2077                 /*
 2078                  * The return action has already been set to stop, since
 2079                  * peripherals don't have any nodes below them in the EDT.
 2080                  */
 2081                 return(retval);
 2082         }
 2083 
 2084         /*
 2085          * If we get to this point, the peripheral that was passed in
 2086          * doesn't match any of the patterns.
 2087          */
 2088         return(retval);
 2089 }
 2090 
 2091 static int
 2092 xptedtbusfunc(struct cam_eb *bus, void *arg)
 2093 {
 2094         struct ccb_dev_match *cdm;
 2095         dev_match_ret retval;
 2096 
 2097         cdm = (struct ccb_dev_match *)arg;
 2098 
 2099         /*
 2100          * If our position is for something deeper in the tree, that means
 2101          * that we've already seen this node.  So, we keep going down.
 2102          */
 2103         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 2104          && (cdm->pos.cookie.bus == bus)
 2105          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 2106          && (cdm->pos.cookie.target != NULL))
 2107                 retval = DM_RET_DESCEND;
 2108         else
 2109                 retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus);
 2110 
 2111         /*
 2112          * If we got an error, bail out of the search.
 2113          */
 2114         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
 2115                 cdm->status = CAM_DEV_MATCH_ERROR;
 2116                 return(0);
 2117         }
 2118 
 2119         /*
 2120          * If the copy flag is set, copy this bus out.
 2121          */
 2122         if (retval & DM_RET_COPY) {
 2123                 int spaceleft, j;
 2124 
 2125                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
 2126                         sizeof(struct dev_match_result));
 2127 
 2128                 /*
 2129                  * If we don't have enough space to put in another
 2130                  * match result, save our position and tell the
 2131                  * user there are more devices to check.
 2132                  */
 2133                 if (spaceleft < sizeof(struct dev_match_result)) {
 2134                         bzero(&cdm->pos, sizeof(cdm->pos));
 2135                         cdm->pos.position_type =
 2136                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS;
 2137 
 2138                         cdm->pos.cookie.bus = bus;
 2139                         cdm->pos.generations[CAM_BUS_GENERATION]=
 2140                                 xsoftc.bus_generation;
 2141                         cdm->status = CAM_DEV_MATCH_MORE;
 2142                         return(0);
 2143                 }
 2144                 j = cdm->num_matches;
 2145                 cdm->num_matches++;
 2146                 cdm->matches[j].type = DEV_MATCH_BUS;
 2147                 cdm->matches[j].result.bus_result.path_id = bus->path_id;
 2148                 cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id;
 2149                 cdm->matches[j].result.bus_result.unit_number =
 2150                         bus->sim->unit_number;
 2151                 strncpy(cdm->matches[j].result.bus_result.dev_name,
 2152                         bus->sim->sim_name, DEV_IDLEN);
 2153         }
 2154 
 2155         /*
 2156          * If the user is only interested in busses, there's no
 2157          * reason to descend to the next level in the tree.
 2158          */
 2159         if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
 2160                 return(1);
 2161 
 2162         /*
 2163          * If there is a target generation recorded, check it to
 2164          * make sure the target list hasn't changed.
 2165          */
 2166         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 2167          && (bus == cdm->pos.cookie.bus)
 2168          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 2169          && (cdm->pos.generations[CAM_TARGET_GENERATION] != 0)
 2170          && (cdm->pos.generations[CAM_TARGET_GENERATION] !=
 2171              bus->generation)) {
 2172                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
 2173                 return(0);
 2174         }
 2175 
 2176         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 2177          && (cdm->pos.cookie.bus == bus)
 2178          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 2179          && (cdm->pos.cookie.target != NULL))
 2180                 return(xpttargettraverse(bus,
 2181                                         (struct cam_et *)cdm->pos.cookie.target,
 2182                                          xptedttargetfunc, arg));
 2183         else
 2184                 return(xpttargettraverse(bus, NULL, xptedttargetfunc, arg));
 2185 }
 2186 
 2187 static int
 2188 xptedttargetfunc(struct cam_et *target, void *arg)
 2189 {
 2190         struct ccb_dev_match *cdm;
 2191 
 2192         cdm = (struct ccb_dev_match *)arg;
 2193 
 2194         /*
 2195          * If there is a device list generation recorded, check it to
 2196          * make sure the device list hasn't changed.
 2197          */
 2198         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 2199          && (cdm->pos.cookie.bus == target->bus)
 2200          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 2201          && (cdm->pos.cookie.target == target)
 2202          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
 2203          && (cdm->pos.generations[CAM_DEV_GENERATION] != 0)
 2204          && (cdm->pos.generations[CAM_DEV_GENERATION] !=
 2205              target->generation)) {
 2206                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
 2207                 return(0);
 2208         }
 2209 
 2210         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 2211          && (cdm->pos.cookie.bus == target->bus)
 2212          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 2213          && (cdm->pos.cookie.target == target)
 2214          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
 2215          && (cdm->pos.cookie.device != NULL))
 2216                 return(xptdevicetraverse(target,
 2217                                         (struct cam_ed *)cdm->pos.cookie.device,
 2218                                          xptedtdevicefunc, arg));
 2219         else
 2220                 return(xptdevicetraverse(target, NULL, xptedtdevicefunc, arg));
 2221 }
 2222 
 2223 static int
 2224 xptedtdevicefunc(struct cam_ed *device, void *arg)
 2225 {
 2226 
 2227         struct ccb_dev_match *cdm;
 2228         dev_match_ret retval;
 2229 
 2230         cdm = (struct ccb_dev_match *)arg;
 2231 
 2232         /*
 2233          * If our position is for something deeper in the tree, that means
 2234          * that we've already seen this node.  So, we keep going down.
 2235          */
 2236         if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE)
 2237          && (cdm->pos.cookie.device == device)
 2238          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
 2239          && (cdm->pos.cookie.periph != NULL))
 2240                 retval = DM_RET_DESCEND;
 2241         else
 2242                 retval = xptdevicematch(cdm->patterns, cdm->num_patterns,
 2243                                         device);
 2244 
 2245         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
 2246                 cdm->status = CAM_DEV_MATCH_ERROR;
 2247                 return(0);
 2248         }
 2249 
 2250         /*
 2251          * If the copy flag is set, copy this device out.
 2252          */
 2253         if (retval & DM_RET_COPY) {
 2254                 int spaceleft, j;
 2255 
 2256                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
 2257                         sizeof(struct dev_match_result));
 2258 
 2259                 /*
 2260                  * If we don't have enough space to put in another
 2261                  * match result, save our position and tell the
 2262                  * user there are more devices to check.
 2263                  */
 2264                 if (spaceleft < sizeof(struct dev_match_result)) {
 2265                         bzero(&cdm->pos, sizeof(cdm->pos));
 2266                         cdm->pos.position_type =
 2267                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
 2268                                 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE;
 2269 
 2270                         cdm->pos.cookie.bus = device->target->bus;
 2271                         cdm->pos.generations[CAM_BUS_GENERATION]=
 2272                                 xsoftc.bus_generation;
 2273                         cdm->pos.cookie.target = device->target;
 2274                         cdm->pos.generations[CAM_TARGET_GENERATION] =
 2275                                 device->target->bus->generation;
 2276                         cdm->pos.cookie.device = device;
 2277                         cdm->pos.generations[CAM_DEV_GENERATION] =
 2278                                 device->target->generation;
 2279                         cdm->status = CAM_DEV_MATCH_MORE;
 2280                         return(0);
 2281                 }
 2282                 j = cdm->num_matches;
 2283                 cdm->num_matches++;
 2284                 cdm->matches[j].type = DEV_MATCH_DEVICE;
 2285                 cdm->matches[j].result.device_result.path_id =
 2286                         device->target->bus->path_id;
 2287                 cdm->matches[j].result.device_result.target_id =
 2288                         device->target->target_id;
 2289                 cdm->matches[j].result.device_result.target_lun =
 2290                         device->lun_id;
 2291                 bcopy(&device->inq_data,
 2292                       &cdm->matches[j].result.device_result.inq_data,
 2293                       sizeof(struct scsi_inquiry_data));
 2294 
 2295                 /* Let the user know whether this device is unconfigured */
 2296                 if (device->flags & CAM_DEV_UNCONFIGURED)
 2297                         cdm->matches[j].result.device_result.flags =
 2298                                 DEV_RESULT_UNCONFIGURED;
 2299                 else
 2300                         cdm->matches[j].result.device_result.flags =
 2301                                 DEV_RESULT_NOFLAG;
 2302         }
 2303 
 2304         /*
 2305          * If the user isn't interested in peripherals, don't descend
 2306          * the tree any further.
 2307          */
 2308         if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
 2309                 return(1);
 2310 
 2311         /*
 2312          * If there is a peripheral list generation recorded, make sure
 2313          * it hasn't changed.
 2314          */
 2315         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 2316          && (device->target->bus == cdm->pos.cookie.bus)
 2317          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 2318          && (device->target == cdm->pos.cookie.target)
 2319          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
 2320          && (device == cdm->pos.cookie.device)
 2321          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
 2322          && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
 2323          && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
 2324              device->generation)){
 2325                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
 2326                 return(0);
 2327         }
 2328 
 2329         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 2330          && (cdm->pos.cookie.bus == device->target->bus)
 2331          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
 2332          && (cdm->pos.cookie.target == device->target)
 2333          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
 2334          && (cdm->pos.cookie.device == device)
 2335          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
 2336          && (cdm->pos.cookie.periph != NULL))
 2337                 return(xptperiphtraverse(device,
 2338                                 (struct cam_periph *)cdm->pos.cookie.periph,
 2339                                 xptedtperiphfunc, arg));
 2340         else
 2341                 return(xptperiphtraverse(device, NULL, xptedtperiphfunc, arg));
 2342 }
 2343 
 2344 static int
 2345 xptedtperiphfunc(struct cam_periph *periph, void *arg)
 2346 {
 2347         struct ccb_dev_match *cdm;
 2348         dev_match_ret retval;
 2349 
 2350         cdm = (struct ccb_dev_match *)arg;
 2351 
 2352         retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
 2353 
 2354         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
 2355                 cdm->status = CAM_DEV_MATCH_ERROR;
 2356                 return(0);
 2357         }
 2358 
 2359         /*
 2360          * If the copy flag is set, copy this peripheral out.
 2361          */
 2362         if (retval & DM_RET_COPY) {
 2363                 int spaceleft, j;
 2364 
 2365                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
 2366                         sizeof(struct dev_match_result));
 2367 
 2368                 /*
 2369                  * If we don't have enough space to put in another
 2370                  * match result, save our position and tell the
 2371                  * user there are more devices to check.
 2372                  */
 2373                 if (spaceleft < sizeof(struct dev_match_result)) {
 2374                         bzero(&cdm->pos, sizeof(cdm->pos));
 2375                         cdm->pos.position_type =
 2376                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
 2377                                 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE |
 2378                                 CAM_DEV_POS_PERIPH;
 2379 
 2380                         cdm->pos.cookie.bus = periph->path->bus;
 2381                         cdm->pos.generations[CAM_BUS_GENERATION]=
 2382                                 xsoftc.bus_generation;
 2383                         cdm->pos.cookie.target = periph->path->target;
 2384                         cdm->pos.generations[CAM_TARGET_GENERATION] =
 2385                                 periph->path->bus->generation;
 2386                         cdm->pos.cookie.device = periph->path->device;
 2387                         cdm->pos.generations[CAM_DEV_GENERATION] =
 2388                                 periph->path->target->generation;
 2389                         cdm->pos.cookie.periph = periph;
 2390                         cdm->pos.generations[CAM_PERIPH_GENERATION] =
 2391                                 periph->path->device->generation;
 2392                         cdm->status = CAM_DEV_MATCH_MORE;
 2393                         return(0);
 2394                 }
 2395 
 2396                 j = cdm->num_matches;
 2397                 cdm->num_matches++;
 2398                 cdm->matches[j].type = DEV_MATCH_PERIPH;
 2399                 cdm->matches[j].result.periph_result.path_id =
 2400                         periph->path->bus->path_id;
 2401                 cdm->matches[j].result.periph_result.target_id =
 2402                         periph->path->target->target_id;
 2403                 cdm->matches[j].result.periph_result.target_lun =
 2404                         periph->path->device->lun_id;
 2405                 cdm->matches[j].result.periph_result.unit_number =
 2406                         periph->unit_number;
 2407                 strncpy(cdm->matches[j].result.periph_result.periph_name,
 2408                         periph->periph_name, DEV_IDLEN);
 2409         }
 2410 
 2411         return(1);
 2412 }
 2413 
 2414 static int
 2415 xptedtmatch(struct ccb_dev_match *cdm)
 2416 {
 2417         int ret;
 2418 
 2419         cdm->num_matches = 0;
 2420 
 2421         /*
 2422          * Check the bus list generation.  If it has changed, the user
 2423          * needs to reset everything and start over.
 2424          */
 2425         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 2426          && (cdm->pos.generations[CAM_BUS_GENERATION] != 0)
 2427          && (cdm->pos.generations[CAM_BUS_GENERATION] != xsoftc.bus_generation)) {
 2428                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
 2429                 return(0);
 2430         }
 2431 
 2432         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
 2433          && (cdm->pos.cookie.bus != NULL))
 2434                 ret = xptbustraverse((struct cam_eb *)cdm->pos.cookie.bus,
 2435                                      xptedtbusfunc, cdm);
 2436         else
 2437                 ret = xptbustraverse(NULL, xptedtbusfunc, cdm);
 2438 
 2439         /*
 2440          * If we get back 0, that means that we had to stop before fully
 2441          * traversing the EDT.  It also means that one of the subroutines
 2442          * has set the status field to the proper value.  If we get back 1,
 2443          * we've fully traversed the EDT and copied out any matching entries.
 2444          */
 2445         if (ret == 1)
 2446                 cdm->status = CAM_DEV_MATCH_LAST;
 2447 
 2448         return(ret);
 2449 }
 2450 
 2451 static int
 2452 xptplistpdrvfunc(struct periph_driver **pdrv, void *arg)
 2453 {
 2454         struct ccb_dev_match *cdm;
 2455 
 2456         cdm = (struct ccb_dev_match *)arg;
 2457 
 2458         if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
 2459          && (cdm->pos.cookie.pdrv == pdrv)
 2460          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
 2461          && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
 2462          && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
 2463              (*pdrv)->generation)) {
 2464                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
 2465                 return(0);
 2466         }
 2467 
 2468         if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
 2469          && (cdm->pos.cookie.pdrv == pdrv)
 2470          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
 2471          && (cdm->pos.cookie.periph != NULL))
 2472                 return(xptpdperiphtraverse(pdrv,
 2473                                 (struct cam_periph *)cdm->pos.cookie.periph,
 2474                                 xptplistperiphfunc, arg));
 2475         else
 2476                 return(xptpdperiphtraverse(pdrv, NULL,xptplistperiphfunc, arg));
 2477 }
 2478 
 2479 static int
 2480 xptplistperiphfunc(struct cam_periph *periph, void *arg)
 2481 {
 2482         struct ccb_dev_match *cdm;
 2483         dev_match_ret retval;
 2484 
 2485         cdm = (struct ccb_dev_match *)arg;
 2486 
 2487         retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
 2488 
 2489         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
 2490                 cdm->status = CAM_DEV_MATCH_ERROR;
 2491                 return(0);
 2492         }
 2493 
 2494         /*
 2495          * If the copy flag is set, copy this peripheral out.
 2496          */
 2497         if (retval & DM_RET_COPY) {
 2498                 int spaceleft, j;
 2499 
 2500                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
 2501                         sizeof(struct dev_match_result));
 2502 
 2503                 /*
 2504                  * If we don't have enough space to put in another
 2505                  * match result, save our position and tell the
 2506                  * user there are more devices to check.
 2507                  */
 2508                 if (spaceleft < sizeof(struct dev_match_result)) {
 2509                         struct periph_driver **pdrv;
 2510 
 2511                         pdrv = NULL;
 2512                         bzero(&cdm->pos, sizeof(cdm->pos));
 2513                         cdm->pos.position_type =
 2514                                 CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR |
 2515                                 CAM_DEV_POS_PERIPH;
 2516 
 2517                         /*
 2518                          * This may look a bit non-sensical, but it is
 2519                          * actually quite logical.  There are very few
 2520                          * peripheral drivers, and bloating every peripheral
 2521                          * structure with a pointer back to its parent
 2522                          * peripheral driver linker set entry would cost
 2523                          * more in the long run than doing this quick lookup.
 2524                          */
 2525                         for (pdrv = periph_drivers; *pdrv != NULL; pdrv++) {
 2526                                 if (strcmp((*pdrv)->driver_name,
 2527                                     periph->periph_name) == 0)
 2528                                         break;
 2529                         }
 2530 
 2531                         if (*pdrv == NULL) {
 2532                                 cdm->status = CAM_DEV_MATCH_ERROR;
 2533                                 return(0);
 2534                         }
 2535 
 2536                         cdm->pos.cookie.pdrv = pdrv;
 2537                         /*
 2538                          * The periph generation slot does double duty, as
 2539                          * does the periph pointer slot.  They are used for
 2540                          * both edt and pdrv lookups and positioning.
 2541                          */
 2542                         cdm->pos.cookie.periph = periph;
 2543                         cdm->pos.generations[CAM_PERIPH_GENERATION] =
 2544                                 (*pdrv)->generation;
 2545                         cdm->status = CAM_DEV_MATCH_MORE;
 2546                         return(0);
 2547                 }
 2548 
 2549                 j = cdm->num_matches;
 2550                 cdm->num_matches++;
 2551                 cdm->matches[j].type = DEV_MATCH_PERIPH;
 2552                 cdm->matches[j].result.periph_result.path_id =
 2553                         periph->path->bus->path_id;
 2554 
 2555                 /*
 2556                  * The transport layer peripheral doesn't have a target or
 2557                  * lun.
 2558                  */
 2559                 if (periph->path->target)
 2560                         cdm->matches[j].result.periph_result.target_id =
 2561                                 periph->path->target->target_id;
 2562                 else
 2563                         cdm->matches[j].result.periph_result.target_id = -1;
 2564 
 2565                 if (periph->path->device)
 2566                         cdm->matches[j].result.periph_result.target_lun =
 2567                                 periph->path->device->lun_id;
 2568                 else
 2569                         cdm->matches[j].result.periph_result.target_lun = -1;
 2570 
 2571                 cdm->matches[j].result.periph_result.unit_number =
 2572                         periph->unit_number;
 2573                 strncpy(cdm->matches[j].result.periph_result.periph_name,
 2574                         periph->periph_name, DEV_IDLEN);
 2575         }
 2576 
 2577         return(1);
 2578 }
 2579 
 2580 static int
 2581 xptperiphlistmatch(struct ccb_dev_match *cdm)
 2582 {
 2583         int ret;
 2584 
 2585         cdm->num_matches = 0;
 2586 
 2587         /*
 2588          * At this point in the edt traversal function, we check the bus
 2589          * list generation to make sure that no busses have been added or
 2590          * removed since the user last sent a XPT_DEV_MATCH ccb through.
 2591          * For the peripheral driver list traversal function, however, we
 2592          * don't have to worry about new peripheral driver types coming or
 2593          * going; they're in a linker set, and therefore can't change
 2594          * without a recompile.
 2595          */
 2596 
 2597         if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
 2598          && (cdm->pos.cookie.pdrv != NULL))
 2599                 ret = xptpdrvtraverse(
 2600                                 (struct periph_driver **)cdm->pos.cookie.pdrv,
 2601                                 xptplistpdrvfunc, cdm);
 2602         else
 2603                 ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm);
 2604 
 2605         /*
 2606          * If we get back 0, that means that we had to stop before fully
 2607          * traversing the peripheral driver tree.  It also means that one of
 2608          * the subroutines has set the status field to the proper value.  If
 2609          * we get back 1, we've fully traversed the EDT and copied out any
 2610          * matching entries.
 2611          */
 2612         if (ret == 1)
 2613                 cdm->status = CAM_DEV_MATCH_LAST;
 2614 
 2615         return(ret);
 2616 }
 2617 
 2618 static int
 2619 xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg)
 2620 {
 2621         struct cam_eb *bus, *next_bus;
 2622         int retval;
 2623 
 2624         retval = 1;
 2625 
 2626         mtx_lock(&xsoftc.xpt_topo_lock);
 2627         for (bus = (start_bus ? start_bus : TAILQ_FIRST(&xsoftc.xpt_busses));
 2628              bus != NULL;
 2629              bus = next_bus) {
 2630                 next_bus = TAILQ_NEXT(bus, links);
 2631 
 2632                 mtx_unlock(&xsoftc.xpt_topo_lock);
 2633                 CAM_SIM_LOCK(bus->sim);
 2634                 retval = tr_func(bus, arg);
 2635                 CAM_SIM_UNLOCK(bus->sim);
 2636                 if (retval == 0)
 2637                         return(retval);
 2638                 mtx_lock(&xsoftc.xpt_topo_lock);
 2639         }
 2640         mtx_unlock(&xsoftc.xpt_topo_lock);
 2641 
 2642         return(retval);
 2643 }
 2644 
 2645 int
 2646 xpt_sim_opened(struct cam_sim *sim)
 2647 {
 2648         struct cam_eb *bus;
 2649         struct cam_et *target;
 2650         struct cam_ed *device;
 2651         struct cam_periph *periph;
 2652 
 2653         KASSERT(sim->refcount >= 1, ("sim->refcount >= 1"));
 2654         mtx_assert(sim->mtx, MA_OWNED);
 2655 
 2656         mtx_lock(&xsoftc.xpt_topo_lock);
 2657         TAILQ_FOREACH(bus, &xsoftc.xpt_busses, links) {
 2658                 if (bus->sim != sim)
 2659                         continue;
 2660 
 2661                 TAILQ_FOREACH(target, &bus->et_entries, links) {
 2662                         TAILQ_FOREACH(device, &target->ed_entries, links) {
 2663                                 SLIST_FOREACH(periph, &device->periphs,
 2664                                     periph_links) {
 2665                                         if (periph->refcount > 0) {
 2666                                                 mtx_unlock(&xsoftc.xpt_topo_lock);
 2667                                                 return (1);
 2668                                         }
 2669                                 }
 2670                         }
 2671                 }
 2672         }
 2673 
 2674         mtx_unlock(&xsoftc.xpt_topo_lock);
 2675         return (0);
 2676 }
 2677 
 2678 static int
 2679 xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target,
 2680                   xpt_targetfunc_t *tr_func, void *arg)
 2681 {
 2682         struct cam_et *target, *next_target;
 2683         int retval;
 2684 
 2685         retval = 1;
 2686         for (target = (start_target ? start_target :
 2687                        TAILQ_FIRST(&bus->et_entries));
 2688              target != NULL; target = next_target) {
 2689 
 2690                 next_target = TAILQ_NEXT(target, links);
 2691 
 2692                 retval = tr_func(target, arg);
 2693 
 2694                 if (retval == 0)
 2695                         return(retval);
 2696         }
 2697 
 2698         return(retval);
 2699 }
 2700 
 2701 static int
 2702 xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device,
 2703                   xpt_devicefunc_t *tr_func, void *arg)
 2704 {
 2705         struct cam_ed *device, *next_device;
 2706         int retval;
 2707 
 2708         retval = 1;
 2709         for (device = (start_device ? start_device :
 2710                        TAILQ_FIRST(&target->ed_entries));
 2711              device != NULL;
 2712              device = next_device) {
 2713 
 2714                 next_device = TAILQ_NEXT(device, links);
 2715 
 2716                 retval = tr_func(device, arg);
 2717 
 2718                 if (retval == 0)
 2719                         return(retval);
 2720         }
 2721 
 2722         return(retval);
 2723 }
 2724 
 2725 static int
 2726 xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph,
 2727                   xpt_periphfunc_t *tr_func, void *arg)
 2728 {
 2729         struct cam_periph *periph, *next_periph;
 2730         int retval;
 2731 
 2732         retval = 1;
 2733 
 2734         for (periph = (start_periph ? start_periph :
 2735                        SLIST_FIRST(&device->periphs));
 2736              periph != NULL;
 2737              periph = next_periph) {
 2738 
 2739                 next_periph = SLIST_NEXT(periph, periph_links);
 2740 
 2741                 retval = tr_func(periph, arg);
 2742                 if (retval == 0)
 2743                         return(retval);
 2744         }
 2745 
 2746         return(retval);
 2747 }
 2748 
 2749 static int
 2750 xptpdrvtraverse(struct periph_driver **start_pdrv,
 2751                 xpt_pdrvfunc_t *tr_func, void *arg)
 2752 {
 2753         struct periph_driver **pdrv;
 2754         int retval;
 2755 
 2756         retval = 1;
 2757 
 2758         /*
 2759          * We don't traverse the peripheral driver list like we do the
 2760          * other lists, because it is a linker set, and therefore cannot be
 2761          * changed during runtime.  If the peripheral driver list is ever
 2762          * re-done to be something other than a linker set (i.e. it can
 2763          * change while the system is running), the list traversal should
 2764          * be modified to work like the other traversal functions.
 2765          */
 2766         for (pdrv = (start_pdrv ? start_pdrv : periph_drivers);
 2767              *pdrv != NULL; pdrv++) {
 2768                 retval = tr_func(pdrv, arg);
 2769 
 2770                 if (retval == 0)
 2771                         return(retval);
 2772         }
 2773 
 2774         return(retval);
 2775 }
 2776 
 2777 static int
 2778 xptpdperiphtraverse(struct periph_driver **pdrv,
 2779                     struct cam_periph *start_periph,
 2780                     xpt_periphfunc_t *tr_func, void *arg)
 2781 {
 2782         struct cam_periph *periph, *next_periph;
 2783         int retval;
 2784 
 2785         retval = 1;
 2786 
 2787         for (periph = (start_periph ? start_periph :
 2788              TAILQ_FIRST(&(*pdrv)->units)); periph != NULL;
 2789              periph = next_periph) {
 2790 
 2791                 next_periph = TAILQ_NEXT(periph, unit_links);
 2792 
 2793                 retval = tr_func(periph, arg);
 2794                 if (retval == 0)
 2795                         return(retval);
 2796         }
 2797         return(retval);
 2798 }
 2799 
 2800 static int
 2801 xptdefbusfunc(struct cam_eb *bus, void *arg)
 2802 {
 2803         struct xpt_traverse_config *tr_config;
 2804 
 2805         tr_config = (struct xpt_traverse_config *)arg;
 2806 
 2807         if (tr_config->depth == XPT_DEPTH_BUS) {
 2808                 xpt_busfunc_t *tr_func;
 2809 
 2810                 tr_func = (xpt_busfunc_t *)tr_config->tr_func;
 2811 
 2812                 return(tr_func(bus, tr_config->tr_arg));
 2813         } else
 2814                 return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg));
 2815 }
 2816 
 2817 static int
 2818 xptdeftargetfunc(struct cam_et *target, void *arg)
 2819 {
 2820         struct xpt_traverse_config *tr_config;
 2821 
 2822         tr_config = (struct xpt_traverse_config *)arg;
 2823 
 2824         if (tr_config->depth == XPT_DEPTH_TARGET) {
 2825                 xpt_targetfunc_t *tr_func;
 2826 
 2827                 tr_func = (xpt_targetfunc_t *)tr_config->tr_func;
 2828 
 2829                 return(tr_func(target, tr_config->tr_arg));
 2830         } else
 2831                 return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg));
 2832 }
 2833 
 2834 static int
 2835 xptdefdevicefunc(struct cam_ed *device, void *arg)
 2836 {
 2837         struct xpt_traverse_config *tr_config;
 2838 
 2839         tr_config = (struct xpt_traverse_config *)arg;
 2840 
 2841         if (tr_config->depth == XPT_DEPTH_DEVICE) {
 2842                 xpt_devicefunc_t *tr_func;
 2843 
 2844                 tr_func = (xpt_devicefunc_t *)tr_config->tr_func;
 2845 
 2846                 return(tr_func(device, tr_config->tr_arg));
 2847         } else
 2848                 return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg));
 2849 }
 2850 
 2851 static int
 2852 xptdefperiphfunc(struct cam_periph *periph, void *arg)
 2853 {
 2854         struct xpt_traverse_config *tr_config;
 2855         xpt_periphfunc_t *tr_func;
 2856 
 2857         tr_config = (struct xpt_traverse_config *)arg;
 2858 
 2859         tr_func = (xpt_periphfunc_t *)tr_config->tr_func;
 2860 
 2861         /*
 2862          * Unlike the other default functions, we don't check for depth
 2863          * here.  The peripheral driver level is the last level in the EDT,
 2864          * so if we're here, we should execute the function in question.
 2865          */
 2866         return(tr_func(periph, tr_config->tr_arg));
 2867 }
 2868 
 2869 /*
 2870  * Execute the given function for every bus in the EDT.
 2871  */
 2872 static int
 2873 xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg)
 2874 {
 2875         struct xpt_traverse_config tr_config;
 2876 
 2877         tr_config.depth = XPT_DEPTH_BUS;
 2878         tr_config.tr_func = tr_func;
 2879         tr_config.tr_arg = arg;
 2880 
 2881         return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
 2882 }
 2883 
 2884 /*
 2885  * Execute the given function for every device in the EDT.
 2886  */
 2887 static int
 2888 xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg)
 2889 {
 2890         struct xpt_traverse_config tr_config;
 2891 
 2892         tr_config.depth = XPT_DEPTH_DEVICE;
 2893         tr_config.tr_func = tr_func;
 2894         tr_config.tr_arg = arg;
 2895 
 2896         return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
 2897 }
 2898 
 2899 static int
 2900 xptsetasyncfunc(struct cam_ed *device, void *arg)
 2901 {
 2902         struct cam_path path;
 2903         struct ccb_getdev cgd;
 2904         struct async_node *cur_entry;
 2905 
 2906         cur_entry = (struct async_node *)arg;
 2907 
 2908         /*
 2909          * Don't report unconfigured devices (Wildcard devs,
 2910          * devices only for target mode, device instances
 2911          * that have been invalidated but are waiting for
 2912          * their last reference count to be released).
 2913          */
 2914         if ((device->flags & CAM_DEV_UNCONFIGURED) != 0)
 2915                 return (1);
 2916 
 2917         xpt_compile_path(&path,
 2918                          NULL,
 2919                          device->target->bus->path_id,
 2920                          device->target->target_id,
 2921                          device->lun_id);
 2922         xpt_setup_ccb(&cgd.ccb_h, &path, /*priority*/1);
 2923         cgd.ccb_h.func_code = XPT_GDEV_TYPE;
 2924         xpt_action((union ccb *)&cgd);
 2925         cur_entry->callback(cur_entry->callback_arg,
 2926                             AC_FOUND_DEVICE,
 2927                             &path, &cgd);
 2928         xpt_release_path(&path);
 2929 
 2930         return(1);
 2931 }
 2932 
 2933 static int
 2934 xptsetasyncbusfunc(struct cam_eb *bus, void *arg)
 2935 {
 2936         struct cam_path path;
 2937         struct ccb_pathinq cpi;
 2938         struct async_node *cur_entry;
 2939 
 2940         cur_entry = (struct async_node *)arg;
 2941 
 2942         xpt_compile_path(&path, /*periph*/NULL,
 2943                          bus->sim->path_id,
 2944                          CAM_TARGET_WILDCARD,
 2945                          CAM_LUN_WILDCARD);
 2946         xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
 2947         cpi.ccb_h.func_code = XPT_PATH_INQ;
 2948         xpt_action((union ccb *)&cpi);
 2949         cur_entry->callback(cur_entry->callback_arg,
 2950                             AC_PATH_REGISTERED,
 2951                             &path, &cpi);
 2952         xpt_release_path(&path);
 2953 
 2954         return(1);
 2955 }
 2956 
 2957 static void
 2958 xpt_action_sasync_cb(void *context, int pending)
 2959 {
 2960         struct async_node *cur_entry;
 2961         struct xpt_task *task;
 2962         uint32_t added;
 2963 
 2964         task = (struct xpt_task *)context;
 2965         cur_entry = (struct async_node *)task->data1;
 2966         added = task->data2;
 2967 
 2968         if ((added & AC_FOUND_DEVICE) != 0) {
 2969                 /*
 2970                  * Get this peripheral up to date with all
 2971                  * the currently existing devices.
 2972                  */
 2973                 xpt_for_all_devices(xptsetasyncfunc, cur_entry);
 2974         }
 2975         if ((added & AC_PATH_REGISTERED) != 0) {
 2976                 /*
 2977                  * Get this peripheral up to date with all
 2978                  * the currently existing busses.
 2979                  */
 2980                 xpt_for_all_busses(xptsetasyncbusfunc, cur_entry);
 2981         }
 2982 
 2983         free(task, M_CAMXPT);
 2984 }
 2985 
 2986 void
 2987 xpt_action(union ccb *start_ccb)
 2988 {
 2989 
 2990         CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_action\n"));
 2991 
 2992         start_ccb->ccb_h.status = CAM_REQ_INPROG;
 2993 
 2994         switch (start_ccb->ccb_h.func_code) {
 2995         case XPT_SCSI_IO:
 2996         {
 2997                 struct cam_ed *device;
 2998 #ifdef CAMDEBUG
 2999                 char cdb_str[(SCSI_MAX_CDBLEN * 3) + 1];
 3000                 struct cam_path *path;
 3001 
 3002                 path = start_ccb->ccb_h.path;
 3003 #endif
 3004 
 3005                 /*
 3006                  * For the sake of compatibility with SCSI-1
 3007                  * devices that may not understand the identify
 3008                  * message, we include lun information in the
 3009                  * second byte of all commands.  SCSI-1 specifies
 3010                  * that luns are a 3 bit value and reserves only 3
 3011                  * bits for lun information in the CDB.  Later
 3012                  * revisions of the SCSI spec allow for more than 8
 3013                  * luns, but have deprecated lun information in the
 3014                  * CDB.  So, if the lun won't fit, we must omit.
 3015                  *
 3016                  * Also be aware that during initial probing for devices,
 3017                  * the inquiry information is unknown but initialized to 0.
 3018                  * This means that this code will be exercised while probing
 3019                  * devices with an ANSI revision greater than 2.
 3020                  */
 3021                 device = start_ccb->ccb_h.path->device;
 3022                 if (device->protocol_version <= SCSI_REV_2
 3023                  && start_ccb->ccb_h.target_lun < 8
 3024                  && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) {
 3025 
 3026                         start_ccb->csio.cdb_io.cdb_bytes[1] |=
 3027                             start_ccb->ccb_h.target_lun << 5;
 3028                 }
 3029                 start_ccb->csio.scsi_status = SCSI_STATUS_OK;
 3030                 CAM_DEBUG(path, CAM_DEBUG_CDB,("%s. CDB: %s\n",
 3031                           scsi_op_desc(start_ccb->csio.cdb_io.cdb_bytes[0],
 3032                                        &path->device->inq_data),
 3033                           scsi_cdb_string(start_ccb->csio.cdb_io.cdb_bytes,
 3034                                           cdb_str, sizeof(cdb_str))));
 3035         }
 3036         /* FALLTHROUGH */
 3037         case XPT_TARGET_IO:
 3038         case XPT_CONT_TARGET_IO:
 3039                 start_ccb->csio.sense_resid = 0;
 3040                 start_ccb->csio.resid = 0;
 3041                 /* FALLTHROUGH */
 3042         case XPT_RESET_DEV:
 3043         case XPT_ENG_EXEC:
 3044         {
 3045                 struct cam_path *path;
 3046                 int runq;
 3047 
 3048                 path = start_ccb->ccb_h.path;
 3049 
 3050                 cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb);
 3051                 if (path->device->qfrozen_cnt == 0)
 3052                         runq = xpt_schedule_dev_sendq(path->bus, path->device);
 3053                 else
 3054                         runq = 0;
 3055                 if (runq != 0)
 3056                         xpt_run_dev_sendq(path->bus);
 3057                 break;
 3058         }
 3059         case XPT_SET_TRAN_SETTINGS:
 3060         {
 3061                 xpt_set_transfer_settings(&start_ccb->cts,
 3062                                           start_ccb->ccb_h.path->device,
 3063                                           /*async_update*/FALSE);
 3064                 break;
 3065         }
 3066         case XPT_CALC_GEOMETRY:
 3067         {
 3068                 struct cam_sim *sim;
 3069 
 3070                 /* Filter out garbage */
 3071                 if (start_ccb->ccg.block_size == 0
 3072                  || start_ccb->ccg.volume_size == 0) {
 3073                         start_ccb->ccg.cylinders = 0;
 3074                         start_ccb->ccg.heads = 0;
 3075                         start_ccb->ccg.secs_per_track = 0;
 3076                         start_ccb->ccb_h.status = CAM_REQ_CMP;
 3077                         break;
 3078                 }
 3079 #ifdef PC98
 3080                 /*
 3081                  * In a PC-98 system, geometry translation depens on
 3082                  * the "real" device geometry obtained from mode page 4.
 3083                  * SCSI geometry translation is performed in the
 3084                  * initialization routine of the SCSI BIOS and the result
 3085                  * stored in host memory.  If the translation is available
 3086                  * in host memory, use it.  If not, rely on the default
 3087                  * translation the device driver performs.
 3088                  */
 3089                 if (scsi_da_bios_params(&start_ccb->ccg) != 0) {
 3090                         start_ccb->ccb_h.status = CAM_REQ_CMP;
 3091                         break;
 3092                 }
 3093 #endif
 3094                 sim = start_ccb->ccb_h.path->bus->sim;
 3095                 (*(sim->sim_action))(sim, start_ccb);
 3096                 break;
 3097         }
 3098         case XPT_ABORT:
 3099         {
 3100                 union ccb* abort_ccb;
 3101 
 3102                 abort_ccb = start_ccb->cab.abort_ccb;
 3103                 if (XPT_FC_IS_DEV_QUEUED(abort_ccb)) {
 3104 
 3105                         if (abort_ccb->ccb_h.pinfo.index >= 0) {
 3106                                 struct cam_ccbq *ccbq;
 3107 
 3108                                 ccbq = &abort_ccb->ccb_h.path->device->ccbq;
 3109                                 cam_ccbq_remove_ccb(ccbq, abort_ccb);
 3110                                 abort_ccb->ccb_h.status =
 3111                                     CAM_REQ_ABORTED|CAM_DEV_QFRZN;
 3112                                 xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
 3113                                 xpt_done(abort_ccb);
 3114                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 3115                                 break;
 3116                         }
 3117                         if (abort_ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX
 3118                          && (abort_ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) {
 3119                                 /*
 3120                                  * We've caught this ccb en route to
 3121                                  * the SIM.  Flag it for abort and the
 3122                                  * SIM will do so just before starting
 3123                                  * real work on the CCB.
 3124                                  */
 3125                                 abort_ccb->ccb_h.status =
 3126                                     CAM_REQ_ABORTED|CAM_DEV_QFRZN;
 3127                                 xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
 3128                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 3129                                 break;
 3130                         }
 3131                 }
 3132                 if (XPT_FC_IS_QUEUED(abort_ccb)
 3133                  && (abort_ccb->ccb_h.pinfo.index == CAM_DONEQ_INDEX)) {
 3134                         /*
 3135                          * It's already completed but waiting
 3136                          * for our SWI to get to it.
 3137                          */
 3138                         start_ccb->ccb_h.status = CAM_UA_ABORT;
 3139                         break;
 3140                 }
 3141                 /*
 3142                  * If we weren't able to take care of the abort request
 3143                  * in the XPT, pass the request down to the SIM for processing.
 3144                  */
 3145         }
 3146         /* FALLTHROUGH */
 3147         case XPT_ACCEPT_TARGET_IO:
 3148         case XPT_EN_LUN:
 3149         case XPT_IMMED_NOTIFY:
 3150         case XPT_NOTIFY_ACK:
 3151         case XPT_GET_TRAN_SETTINGS:
 3152         case XPT_RESET_BUS:
 3153         case XPT_IMMEDIATE_NOTIFY:
 3154         case XPT_NOTIFY_ACKNOWLEDGE:
 3155         case XPT_GET_SIM_KNOB:
 3156         case XPT_SET_SIM_KNOB:
 3157         {
 3158                 struct cam_sim *sim;
 3159 
 3160                 sim = start_ccb->ccb_h.path->bus->sim;
 3161                 (*(sim->sim_action))(sim, start_ccb);
 3162                 break;
 3163         }
 3164         case XPT_PATH_INQ:
 3165         {
 3166                 struct cam_sim *sim;
 3167 
 3168                 sim = start_ccb->ccb_h.path->bus->sim;
 3169                 (*(sim->sim_action))(sim, start_ccb);
 3170                 break;
 3171         }
 3172         case XPT_PATH_STATS:
 3173                 start_ccb->cpis.last_reset =
 3174                         start_ccb->ccb_h.path->bus->last_reset;
 3175                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 3176                 break;
 3177         case XPT_GDEV_TYPE:
 3178         {
 3179                 struct cam_ed *dev;
 3180 
 3181                 dev = start_ccb->ccb_h.path->device;
 3182                 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
 3183                         start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
 3184                 } else {
 3185                         struct ccb_getdev *cgd;
 3186                         struct cam_eb *bus;
 3187                         struct cam_et *tar;
 3188 
 3189                         cgd = &start_ccb->cgd;
 3190                         bus = cgd->ccb_h.path->bus;
 3191                         tar = cgd->ccb_h.path->target;
 3192                         cgd->inq_data = dev->inq_data;
 3193                         cgd->ccb_h.status = CAM_REQ_CMP;
 3194                         cgd->serial_num_len = dev->serial_num_len;
 3195                         if ((dev->serial_num_len > 0)
 3196                          && (dev->serial_num != NULL))
 3197                                 bcopy(dev->serial_num, cgd->serial_num,
 3198                                       dev->serial_num_len);
 3199                 }
 3200                 break;
 3201         }
 3202         case XPT_GDEV_STATS:
 3203         {
 3204                 struct cam_ed *dev;
 3205 
 3206                 dev = start_ccb->ccb_h.path->device;
 3207                 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
 3208                         start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
 3209                 } else {
 3210                         struct ccb_getdevstats *cgds;
 3211                         struct cam_eb *bus;
 3212                         struct cam_et *tar;
 3213 
 3214                         cgds = &start_ccb->cgds;
 3215                         bus = cgds->ccb_h.path->bus;
 3216                         tar = cgds->ccb_h.path->target;
 3217                         cgds->dev_openings = dev->ccbq.dev_openings;
 3218                         cgds->dev_active = dev->ccbq.dev_active;
 3219                         cgds->devq_openings = dev->ccbq.devq_openings;
 3220                         cgds->devq_queued = dev->ccbq.queue.entries;
 3221                         cgds->held = dev->ccbq.held;
 3222                         cgds->last_reset = tar->last_reset;
 3223                         cgds->maxtags = dev->quirk->maxtags;
 3224                         cgds->mintags = dev->quirk->mintags;
 3225                         if (timevalcmp(&tar->last_reset, &bus->last_reset, <))
 3226                                 cgds->last_reset = bus->last_reset;
 3227                         cgds->ccb_h.status = CAM_REQ_CMP;
 3228                 }
 3229                 break;
 3230         }
 3231         case XPT_GDEVLIST:
 3232         {
 3233                 struct cam_periph       *nperiph;
 3234                 struct periph_list      *periph_head;
 3235                 struct ccb_getdevlist   *cgdl;
 3236                 u_int                   i;
 3237                 struct cam_ed           *device;
 3238                 int                     found;
 3239 
 3240 
 3241                 found = 0;
 3242 
 3243                 /*
 3244                  * Don't want anyone mucking with our data.
 3245                  */
 3246                 device = start_ccb->ccb_h.path->device;
 3247                 periph_head = &device->periphs;
 3248                 cgdl = &start_ccb->cgdl;
 3249 
 3250                 /*
 3251                  * Check and see if the list has changed since the user
 3252                  * last requested a list member.  If so, tell them that the
 3253                  * list has changed, and therefore they need to start over
 3254                  * from the beginning.
 3255                  */
 3256                 if ((cgdl->index != 0) &&
 3257                     (cgdl->generation != device->generation)) {
 3258                         cgdl->status = CAM_GDEVLIST_LIST_CHANGED;
 3259                         break;
 3260                 }
 3261 
 3262                 /*
 3263                  * Traverse the list of peripherals and attempt to find
 3264                  * the requested peripheral.
 3265                  */
 3266                 for (nperiph = SLIST_FIRST(periph_head), i = 0;
 3267                      (nperiph != NULL) && (i <= cgdl->index);
 3268                      nperiph = SLIST_NEXT(nperiph, periph_links), i++) {
 3269                         if (i == cgdl->index) {
 3270                                 strncpy(cgdl->periph_name,
 3271                                         nperiph->periph_name,
 3272                                         DEV_IDLEN);
 3273                                 cgdl->unit_number = nperiph->unit_number;
 3274                                 found = 1;
 3275                         }
 3276                 }
 3277                 if (found == 0) {
 3278                         cgdl->status = CAM_GDEVLIST_ERROR;
 3279                         break;
 3280                 }
 3281 
 3282                 if (nperiph == NULL)
 3283                         cgdl->status = CAM_GDEVLIST_LAST_DEVICE;
 3284                 else
 3285                         cgdl->status = CAM_GDEVLIST_MORE_DEVS;
 3286 
 3287                 cgdl->index++;
 3288                 cgdl->generation = device->generation;
 3289 
 3290                 cgdl->ccb_h.status = CAM_REQ_CMP;
 3291                 break;
 3292         }
 3293         case XPT_DEV_MATCH:
 3294         {
 3295                 dev_pos_type position_type;
 3296                 struct ccb_dev_match *cdm;
 3297 
 3298                 cdm = &start_ccb->cdm;
 3299 
 3300                 /*
 3301                  * There are two ways of getting at information in the EDT.
 3302                  * The first way is via the primary EDT tree.  It starts
 3303                  * with a list of busses, then a list of targets on a bus,
 3304                  * then devices/luns on a target, and then peripherals on a
 3305                  * device/lun.  The "other" way is by the peripheral driver
 3306                  * lists.  The peripheral driver lists are organized by
 3307                  * peripheral driver.  (obviously)  So it makes sense to
 3308                  * use the peripheral driver list if the user is looking
 3309                  * for something like "da1", or all "da" devices.  If the
 3310                  * user is looking for something on a particular bus/target
 3311                  * or lun, it's generally better to go through the EDT tree.
 3312                  */
 3313 
 3314                 if (cdm->pos.position_type != CAM_DEV_POS_NONE)
 3315                         position_type = cdm->pos.position_type;
 3316                 else {
 3317                         u_int i;
 3318 
 3319                         position_type = CAM_DEV_POS_NONE;
 3320 
 3321                         for (i = 0; i < cdm->num_patterns; i++) {
 3322                                 if ((cdm->patterns[i].type == DEV_MATCH_BUS)
 3323                                  ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){
 3324                                         position_type = CAM_DEV_POS_EDT;
 3325                                         break;
 3326                                 }
 3327                         }
 3328 
 3329                         if (cdm->num_patterns == 0)
 3330                                 position_type = CAM_DEV_POS_EDT;
 3331                         else if (position_type == CAM_DEV_POS_NONE)
 3332                                 position_type = CAM_DEV_POS_PDRV;
 3333                 }
 3334 
 3335                 switch(position_type & CAM_DEV_POS_TYPEMASK) {
 3336                 case CAM_DEV_POS_EDT:
 3337                         xptedtmatch(cdm);
 3338                         break;
 3339                 case CAM_DEV_POS_PDRV:
 3340                         xptperiphlistmatch(cdm);
 3341                         break;
 3342                 default:
 3343                         cdm->status = CAM_DEV_MATCH_ERROR;
 3344                         break;
 3345                 }
 3346 
 3347                 if (cdm->status == CAM_DEV_MATCH_ERROR)
 3348                         start_ccb->ccb_h.status = CAM_REQ_CMP_ERR;
 3349                 else
 3350                         start_ccb->ccb_h.status = CAM_REQ_CMP;
 3351 
 3352                 break;
 3353         }
 3354         case XPT_SASYNC_CB:
 3355         {
 3356                 struct ccb_setasync *csa;
 3357                 struct async_node *cur_entry;
 3358                 struct async_list *async_head;
 3359                 u_int32_t added;
 3360 
 3361                 csa = &start_ccb->csa;
 3362                 added = csa->event_enable;
 3363                 async_head = &csa->ccb_h.path->device->asyncs;
 3364 
 3365                 /*
 3366                  * If there is already an entry for us, simply
 3367                  * update it.
 3368                  */
 3369                 cur_entry = SLIST_FIRST(async_head);
 3370                 while (cur_entry != NULL) {
 3371                         if ((cur_entry->callback_arg == csa->callback_arg)
 3372                          && (cur_entry->callback == csa->callback))
 3373                                 break;
 3374                         cur_entry = SLIST_NEXT(cur_entry, links);
 3375                 }
 3376 
 3377                 if (cur_entry != NULL) {
 3378                         /*
 3379                          * If the request has no flags set,
 3380                          * remove the entry.
 3381                          */
 3382                         added &= ~cur_entry->event_enable;
 3383                         if (csa->event_enable == 0) {
 3384                                 SLIST_REMOVE(async_head, cur_entry,
 3385                                              async_node, links);
 3386                                 csa->ccb_h.path->device->refcount--;
 3387                                 free(cur_entry, M_CAMXPT);
 3388                         } else {
 3389                                 cur_entry->event_enable = csa->event_enable;
 3390                         }
 3391                 } else {
 3392                         cur_entry = malloc(sizeof(*cur_entry), M_CAMXPT,
 3393                                            M_NOWAIT);
 3394                         if (cur_entry == NULL) {
 3395                                 csa->ccb_h.status = CAM_RESRC_UNAVAIL;
 3396                                 break;
 3397                         }
 3398                         cur_entry->event_enable = csa->event_enable;
 3399                         cur_entry->callback_arg = csa->callback_arg;
 3400                         cur_entry->callback = csa->callback;
 3401                         SLIST_INSERT_HEAD(async_head, cur_entry, links);
 3402                         csa->ccb_h.path->device->refcount++;
 3403                 }
 3404 
 3405                 /*
 3406                  * Need to decouple this operation via a taqskqueue so that
 3407                  * the locking doesn't become a mess.
 3408                  */
 3409                 if ((added & (AC_FOUND_DEVICE | AC_PATH_REGISTERED)) != 0) {
 3410                         struct xpt_task *task;
 3411 
 3412                         task = malloc(sizeof(struct xpt_task), M_CAMXPT,
 3413                                       M_NOWAIT);
 3414                         if (task == NULL) {
 3415                                 csa->ccb_h.status = CAM_RESRC_UNAVAIL;
 3416                                 break;
 3417                         }
 3418 
 3419                         TASK_INIT(&task->task, 0, xpt_action_sasync_cb, task);
 3420                         task->data1 = cur_entry;
 3421                         task->data2 = added;
 3422                         taskqueue_enqueue(taskqueue_thread, &task->task);
 3423                 }
 3424 
 3425                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 3426                 break;
 3427         }
 3428         case XPT_REL_SIMQ:
 3429         {
 3430                 struct ccb_relsim *crs;
 3431                 struct cam_ed *dev;
 3432 
 3433                 crs = &start_ccb->crs;
 3434                 dev = crs->ccb_h.path->device;
 3435                 if (dev == NULL) {
 3436 
 3437                         crs->ccb_h.status = CAM_DEV_NOT_THERE;
 3438                         break;
 3439                 }
 3440 
 3441                 if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) {
 3442 
 3443                         if (INQ_DATA_TQ_ENABLED(&dev->inq_data)) {
 3444                                 /* Don't ever go below one opening */
 3445                                 if (crs->openings > 0) {
 3446                                         xpt_dev_ccbq_resize(crs->ccb_h.path,
 3447                                                             crs->openings);
 3448 
 3449                                         if (bootverbose) {
 3450                                                 xpt_print(crs->ccb_h.path,
 3451                                                     "tagged openings now %d\n",
 3452                                                     crs->openings);
 3453                                         }
 3454                                 }
 3455                         }
 3456                 }
 3457 
 3458                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) {
 3459 
 3460                         if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
 3461 
 3462                                 /*
 3463                                  * Just extend the old timeout and decrement
 3464                                  * the freeze count so that a single timeout
 3465                                  * is sufficient for releasing the queue.
 3466                                  */
 3467                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
 3468                                 callout_stop(&dev->callout);
 3469                         } else {
 3470 
 3471                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
 3472                         }
 3473 
 3474                         callout_reset(&dev->callout,
 3475                             (crs->release_timeout * hz) / 1000,
 3476                             xpt_release_devq_timeout, dev);
 3477 
 3478                         dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING;
 3479 
 3480                 }
 3481 
 3482                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) {
 3483 
 3484                         if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) {
 3485                                 /*
 3486                                  * Decrement the freeze count so that a single
 3487                                  * completion is still sufficient to unfreeze
 3488                                  * the queue.
 3489                                  */
 3490                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
 3491                         } else {
 3492 
 3493                                 dev->flags |= CAM_DEV_REL_ON_COMPLETE;
 3494                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
 3495                         }
 3496                 }
 3497 
 3498                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) {
 3499 
 3500                         if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
 3501                          || (dev->ccbq.dev_active == 0)) {
 3502 
 3503                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
 3504                         } else {
 3505 
 3506                                 dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY;
 3507                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
 3508                         }
 3509                 }
 3510 
 3511                 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0) {
 3512 
 3513                         xpt_release_devq(crs->ccb_h.path, /*count*/1,
 3514                                          /*run_queue*/TRUE);
 3515                 }
 3516                 start_ccb->crs.qfrozen_cnt = dev->qfrozen_cnt;
 3517                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 3518                 break;
 3519         }
 3520         case XPT_SCAN_BUS:
 3521                 xpt_scan_bus(start_ccb->ccb_h.path->periph, start_ccb);
 3522                 break;
 3523         case XPT_SCAN_LUN:
 3524                 xpt_scan_lun(start_ccb->ccb_h.path->periph,
 3525                              start_ccb->ccb_h.path, start_ccb->crcn.flags,
 3526                              start_ccb);
 3527                 break;
 3528         case XPT_DEBUG: {
 3529 #ifdef CAMDEBUG
 3530 #ifdef CAM_DEBUG_DELAY
 3531                 cam_debug_delay = CAM_DEBUG_DELAY;
 3532 #endif
 3533                 cam_dflags = start_ccb->cdbg.flags;
 3534                 if (cam_dpath != NULL) {
 3535                         xpt_free_path(cam_dpath);
 3536                         cam_dpath = NULL;
 3537                 }
 3538 
 3539                 if (cam_dflags != CAM_DEBUG_NONE) {
 3540                         if (xpt_create_path(&cam_dpath, xpt_periph,
 3541                                             start_ccb->ccb_h.path_id,
 3542                                             start_ccb->ccb_h.target_id,
 3543                                             start_ccb->ccb_h.target_lun) !=
 3544                                             CAM_REQ_CMP) {
 3545                                 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
 3546                                 cam_dflags = CAM_DEBUG_NONE;
 3547                         } else {
 3548                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 3549                                 xpt_print(cam_dpath, "debugging flags now %x\n",
 3550                                     cam_dflags);
 3551                         }
 3552                 } else {
 3553                         cam_dpath = NULL;
 3554                         start_ccb->ccb_h.status = CAM_REQ_CMP;
 3555                 }
 3556 #else /* !CAMDEBUG */
 3557                 start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
 3558 #endif /* CAMDEBUG */
 3559                 break;
 3560         }
 3561         case XPT_NOOP:
 3562                 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0)
 3563                         xpt_freeze_devq(start_ccb->ccb_h.path, 1);
 3564                 start_ccb->ccb_h.status = CAM_REQ_CMP;
 3565                 break;
 3566         default:
 3567         case XPT_SDEV_TYPE:
 3568         case XPT_TERM_IO:
 3569         case XPT_ENG_INQ:
 3570                 /* XXX Implement */
 3571                 start_ccb->ccb_h.status = CAM_PROVIDE_FAIL;
 3572                 break;
 3573         }
 3574 }
 3575 
 3576 void
 3577 xpt_polled_action(union ccb *start_ccb)
 3578 {
 3579         u_int32_t timeout;
 3580         struct    cam_sim *sim;
 3581         struct    cam_devq *devq;
 3582         struct    cam_ed *dev;
 3583 
 3584 
 3585         timeout = start_ccb->ccb_h.timeout;
 3586         sim = start_ccb->ccb_h.path->bus->sim;
 3587         devq = sim->devq;
 3588         dev = start_ccb->ccb_h.path->device;
 3589 
 3590         mtx_assert(sim->mtx, MA_OWNED);
 3591 
 3592         /*
 3593          * Steal an opening so that no other queued requests
 3594          * can get it before us while we simulate interrupts.
 3595          */
 3596         dev->ccbq.devq_openings--;
 3597         dev->ccbq.dev_openings--;
 3598 
 3599         while(((devq != NULL && devq->send_openings <= 0) ||
 3600            dev->ccbq.dev_openings < 0) && (--timeout > 0)) {
 3601                 DELAY(1000);
 3602                 (*(sim->sim_poll))(sim);
 3603                 camisr_runqueue(&sim->sim_doneq);
 3604         }
 3605 
 3606         dev->ccbq.devq_openings++;
 3607         dev->ccbq.dev_openings++;
 3608 
 3609         if (timeout != 0) {
 3610                 xpt_action(start_ccb);
 3611                 while(--timeout > 0) {
 3612                         (*(sim->sim_poll))(sim);
 3613                         camisr_runqueue(&sim->sim_doneq);
 3614                         if ((start_ccb->ccb_h.status  & CAM_STATUS_MASK)
 3615                             != CAM_REQ_INPROG)
 3616                                 break;
 3617                         DELAY(1000);
 3618                 }
 3619                 if (timeout == 0) {
 3620                         /*
 3621                          * XXX Is it worth adding a sim_timeout entry
 3622                          * point so we can attempt recovery?  If
 3623                          * this is only used for dumps, I don't think
 3624                          * it is.
 3625                          */
 3626                         start_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
 3627                 }
 3628         } else {
 3629                 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
 3630         }
 3631 }
 3632 
 3633 /*
 3634  * Schedule a peripheral driver to receive a ccb when it's
 3635  * target device has space for more transactions.
 3636  */
 3637 void
 3638 xpt_schedule(struct cam_periph *perph, u_int32_t new_priority)
 3639 {
 3640         struct cam_ed *device;
 3641         int runq;
 3642 
 3643         mtx_assert(perph->sim->mtx, MA_OWNED);
 3644 
 3645         CAM_DEBUG(perph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n"));
 3646         device = perph->path->device;
 3647         if (periph_is_queued(perph)) {
 3648                 /* Simply reorder based on new priority */
 3649                 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
 3650                           ("   change priority to %d\n", new_priority));
 3651                 if (new_priority < perph->pinfo.priority) {
 3652                         camq_change_priority(&device->drvq,
 3653                                              perph->pinfo.index,
 3654                                              new_priority);
 3655                 }
 3656                 runq = 0;
 3657         } else {
 3658                 /* New entry on the queue */
 3659                 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
 3660                           ("   added periph to queue\n"));
 3661                 perph->pinfo.priority = new_priority;
 3662                 perph->pinfo.generation = ++device->drvq.generation;
 3663                 camq_insert(&device->drvq, &perph->pinfo);
 3664                 runq = xpt_schedule_dev_allocq(perph->path->bus, device);
 3665         }
 3666         if (runq != 0) {
 3667                 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
 3668                           ("   calling xpt_run_devq\n"));
 3669                 xpt_run_dev_allocq(perph->path->bus);
 3670         }
 3671 }
 3672 
 3673 
 3674 /*
 3675  * Schedule a device to run on a given queue.
 3676  * If the device was inserted as a new entry on the queue,
 3677  * return 1 meaning the device queue should be run. If we
 3678  * were already queued, implying someone else has already
 3679  * started the queue, return 0 so the caller doesn't attempt
 3680  * to run the queue.
 3681  */
 3682 static int
 3683 xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo,
 3684                  u_int32_t new_priority)
 3685 {
 3686         int retval;
 3687         u_int32_t old_priority;
 3688 
 3689         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_schedule_dev\n"));
 3690 
 3691         old_priority = pinfo->priority;
 3692 
 3693         /*
 3694          * Are we already queued?
 3695          */
 3696         if (pinfo->index != CAM_UNQUEUED_INDEX) {
 3697                 /* Simply reorder based on new priority */
 3698                 if (new_priority < old_priority) {
 3699                         camq_change_priority(queue, pinfo->index,
 3700                                              new_priority);
 3701                         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3702                                         ("changed priority to %d\n",
 3703                                          new_priority));
 3704                 }
 3705                 retval = 0;
 3706         } else {
 3707                 /* New entry on the queue */
 3708                 if (new_priority < old_priority)
 3709                         pinfo->priority = new_priority;
 3710 
 3711                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3712                                 ("Inserting onto queue\n"));
 3713                 pinfo->generation = ++queue->generation;
 3714                 camq_insert(queue, pinfo);
 3715                 retval = 1;
 3716         }
 3717         return (retval);
 3718 }
 3719 
 3720 static void
 3721 xpt_run_dev_allocq(struct cam_eb *bus)
 3722 {
 3723         struct  cam_devq *devq;
 3724 
 3725         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_allocq\n"));
 3726         devq = bus->sim->devq;
 3727 
 3728         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3729                         ("   qfrozen_cnt == 0x%x, entries == %d, "
 3730                          "openings == %d, active == %d\n",
 3731                          devq->alloc_queue.qfrozen_cnt,
 3732                          devq->alloc_queue.entries,
 3733                          devq->alloc_openings,
 3734                          devq->alloc_active));
 3735 
 3736         devq->alloc_queue.qfrozen_cnt++;
 3737         while ((devq->alloc_queue.entries > 0)
 3738             && (devq->alloc_openings > 0)
 3739             && (devq->alloc_queue.qfrozen_cnt <= 1)) {
 3740                 struct  cam_ed_qinfo *qinfo;
 3741                 struct  cam_ed *device;
 3742                 union   ccb *work_ccb;
 3743                 struct  cam_periph *drv;
 3744                 struct  camq *drvq;
 3745 
 3746                 qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->alloc_queue,
 3747                                                            CAMQ_HEAD);
 3748                 device = qinfo->device;
 3749 
 3750                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3751                                 ("running device %p\n", device));
 3752 
 3753                 drvq = &device->drvq;
 3754 
 3755 #ifdef CAMDEBUG
 3756                 if (drvq->entries <= 0) {
 3757                         panic("xpt_run_dev_allocq: "
 3758                               "Device on queue without any work to do");
 3759                 }
 3760 #endif
 3761                 if ((work_ccb = xpt_get_ccb(device)) != NULL) {
 3762                         devq->alloc_openings--;
 3763                         devq->alloc_active++;
 3764                         drv = (struct cam_periph*)camq_remove(drvq, CAMQ_HEAD);
 3765                         xpt_setup_ccb(&work_ccb->ccb_h, drv->path,
 3766                                       drv->pinfo.priority);
 3767                         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3768                                         ("calling periph start\n"));
 3769                         drv->periph_start(drv, work_ccb);
 3770                 } else {
 3771                         /*
 3772                          * Malloc failure in alloc_ccb
 3773                          */
 3774                         /*
 3775                          * XXX add us to a list to be run from free_ccb
 3776                          * if we don't have any ccbs active on this
 3777                          * device queue otherwise we may never get run
 3778                          * again.
 3779                          */
 3780                         break;
 3781                 }
 3782 
 3783                 if (drvq->entries > 0) {
 3784                         /* We have more work.  Attempt to reschedule */
 3785                         xpt_schedule_dev_allocq(bus, device);
 3786                 }
 3787         }
 3788         devq->alloc_queue.qfrozen_cnt--;
 3789 }
 3790 
 3791 static void
 3792 xpt_run_dev_sendq(struct cam_eb *bus)
 3793 {
 3794         struct  cam_devq *devq;
 3795 
 3796         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_sendq\n"));
 3797 
 3798         devq = bus->sim->devq;
 3799 
 3800         devq->send_queue.qfrozen_cnt++;
 3801         while ((devq->send_queue.entries > 0)
 3802             && (devq->send_openings > 0)) {
 3803                 struct  cam_ed_qinfo *qinfo;
 3804                 struct  cam_ed *device;
 3805                 union ccb *work_ccb;
 3806                 struct  cam_sim *sim;
 3807 
 3808                 if (devq->send_queue.qfrozen_cnt > 1) {
 3809                         break;
 3810                 }
 3811 
 3812                 qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->send_queue,
 3813                                                            CAMQ_HEAD);
 3814                 device = qinfo->device;
 3815 
 3816                 /*
 3817                  * If the device has been "frozen", don't attempt
 3818                  * to run it.
 3819                  */
 3820                 if (device->qfrozen_cnt > 0) {
 3821                         continue;
 3822                 }
 3823 
 3824                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
 3825                                 ("running device %p\n", device));
 3826 
 3827                 work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD);
 3828                 if (work_ccb == NULL) {
 3829                         printf("device on run queue with no ccbs???\n");
 3830                         continue;
 3831                 }
 3832 
 3833                 if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) {
 3834 
 3835                         mtx_lock(&xsoftc.xpt_lock);
 3836                         if (xsoftc.num_highpower <= 0) {
 3837                                 /*
 3838                                  * We got a high power command, but we
 3839                                  * don't have any available slots.  Freeze
 3840                                  * the device queue until we have a slot
 3841                                  * available.
 3842                                  */
 3843                                 device->qfrozen_cnt++;
 3844                                 STAILQ_INSERT_TAIL(&xsoftc.highpowerq,
 3845                                                    &work_ccb->ccb_h,
 3846                                                    xpt_links.stqe);
 3847 
 3848                                 mtx_unlock(&xsoftc.xpt_lock);
 3849                                 continue;
 3850                         } else {
 3851                                 /*
 3852                                  * Consume a high power slot while
 3853                                  * this ccb runs.
 3854                                  */
 3855                                 xsoftc.num_highpower--;
 3856                         }
 3857                         mtx_unlock(&xsoftc.xpt_lock);
 3858                 }
 3859                 devq->active_dev = device;
 3860                 cam_ccbq_remove_ccb(&device->ccbq, work_ccb);
 3861 
 3862                 cam_ccbq_send_ccb(&device->ccbq, work_ccb);
 3863 
 3864                 devq->send_openings--;
 3865                 devq->send_active++;
 3866 
 3867                 if (device->ccbq.queue.entries > 0)
 3868                         xpt_schedule_dev_sendq(bus, device);
 3869 
 3870                 if (work_ccb && (work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0){
 3871                         /*
 3872                          * The client wants to freeze the queue
 3873                          * after this CCB is sent.
 3874                          */
 3875                         device->qfrozen_cnt++;
 3876                 }
 3877 
 3878                 /* In Target mode, the peripheral driver knows best... */
 3879                 if (work_ccb->ccb_h.func_code == XPT_SCSI_IO) {
 3880                         if ((device->inq_flags & SID_CmdQue) != 0
 3881                          && work_ccb->csio.tag_action != CAM_TAG_ACTION_NONE)
 3882                                 work_ccb->ccb_h.flags |= CAM_TAG_ACTION_VALID;
 3883                         else
 3884                                 /*
 3885                                  * Clear this in case of a retried CCB that
 3886                                  * failed due to a rejected tag.
 3887                                  */
 3888                                 work_ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
 3889                 }
 3890 
 3891                 /*
 3892                  * Device queues can be shared among multiple sim instances
 3893                  * that reside on different busses.  Use the SIM in the queue
 3894                  * CCB's path, rather than the one in the bus that was passed
 3895                  * into this function.
 3896                  */
 3897                 sim = work_ccb->ccb_h.path->bus->sim;
 3898                 (*(sim->sim_action))(sim, work_ccb);
 3899 
 3900                 devq->active_dev = NULL;
 3901         }
 3902         devq->send_queue.qfrozen_cnt--;
 3903 }
 3904 
 3905 /*
 3906  * This function merges stuff from the slave ccb into the master ccb, while
 3907  * keeping important fields in the master ccb constant.
 3908  */
 3909 void
 3910 xpt_merge_ccb(union ccb *master_ccb, union ccb *slave_ccb)
 3911 {
 3912 
 3913         /*
 3914          * Pull fields that are valid for peripheral drivers to set
 3915          * into the master CCB along with the CCB "payload".
 3916          */
 3917         master_ccb->ccb_h.retry_count = slave_ccb->ccb_h.retry_count;
 3918         master_ccb->ccb_h.func_code = slave_ccb->ccb_h.func_code;
 3919         master_ccb->ccb_h.timeout = slave_ccb->ccb_h.timeout;
 3920         master_ccb->ccb_h.flags = slave_ccb->ccb_h.flags;
 3921         bcopy(&(&slave_ccb->ccb_h)[1], &(&master_ccb->ccb_h)[1],
 3922               sizeof(union ccb) - sizeof(struct ccb_hdr));
 3923 }
 3924 
 3925 void
 3926 xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority)
 3927 {
 3928 
 3929         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_setup_ccb\n"));
 3930         ccb_h->pinfo.priority = priority;
 3931         ccb_h->path = path;
 3932         ccb_h->path_id = path->bus->path_id;
 3933         if (path->target)
 3934                 ccb_h->target_id = path->target->target_id;
 3935         else
 3936                 ccb_h->target_id = CAM_TARGET_WILDCARD;
 3937         if (path->device) {
 3938                 ccb_h->target_lun = path->device->lun_id;
 3939                 ccb_h->pinfo.generation = ++path->device->ccbq.queue.generation;
 3940         } else {
 3941                 ccb_h->target_lun = CAM_TARGET_WILDCARD;
 3942         }
 3943         ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
 3944         ccb_h->flags = 0;
 3945 }
 3946 
 3947 /* Path manipulation functions */
 3948 cam_status
 3949 xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph,
 3950                 path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
 3951 {
 3952         struct     cam_path *path;
 3953         cam_status status;
 3954 
 3955         path = (struct cam_path *)malloc(sizeof(*path), M_CAMXPT, M_NOWAIT);
 3956 
 3957         if (path == NULL) {
 3958                 status = CAM_RESRC_UNAVAIL;
 3959                 return(status);
 3960         }
 3961         status = xpt_compile_path(path, perph, path_id, target_id, lun_id);
 3962         if (status != CAM_REQ_CMP) {
 3963                 free(path, M_CAMXPT);
 3964                 path = NULL;
 3965         }
 3966         *new_path_ptr = path;
 3967         return (status);
 3968 }
 3969 
 3970 cam_status
 3971 xpt_create_path_unlocked(struct cam_path **new_path_ptr,
 3972                          struct cam_periph *periph, path_id_t path_id,
 3973                          target_id_t target_id, lun_id_t lun_id)
 3974 {
 3975         struct     cam_path *path;
 3976         struct     cam_eb *bus = NULL;
 3977         cam_status status;
 3978         int        need_unlock = 0;
 3979 
 3980         path = (struct cam_path *)malloc(sizeof(*path), M_CAMXPT, M_WAITOK);
 3981 
 3982         if (path_id != CAM_BUS_WILDCARD) {
 3983                 bus = xpt_find_bus(path_id);
 3984                 if (bus != NULL) {
 3985                         need_unlock = 1;
 3986                         CAM_SIM_LOCK(bus->sim);
 3987                 }
 3988         }
 3989         status = xpt_compile_path(path, periph, path_id, target_id, lun_id);
 3990         if (need_unlock)
 3991                 CAM_SIM_UNLOCK(bus->sim);
 3992         if (status != CAM_REQ_CMP) {
 3993                 free(path, M_CAMXPT);
 3994                 path = NULL;
 3995         }
 3996         *new_path_ptr = path;
 3997         return (status);
 3998 }
 3999 
 4000 static cam_status
 4001 xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph,
 4002                  path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
 4003 {
 4004         struct       cam_eb *bus;
 4005         struct       cam_et *target;
 4006         struct       cam_ed *device;
 4007         cam_status   status;
 4008 
 4009         status = CAM_REQ_CMP;   /* Completed without error */
 4010         target = NULL;          /* Wildcarded */
 4011         device = NULL;          /* Wildcarded */
 4012 
 4013         /*
 4014          * We will potentially modify the EDT, so block interrupts
 4015          * that may attempt to create cam paths.
 4016          */
 4017         bus = xpt_find_bus(path_id);
 4018         if (bus == NULL) {
 4019                 status = CAM_PATH_INVALID;
 4020         } else {
 4021                 target = xpt_find_target(bus, target_id);
 4022                 if (target == NULL) {
 4023                         /* Create one */
 4024                         struct cam_et *new_target;
 4025 
 4026                         new_target = xpt_alloc_target(bus, target_id);
 4027                         if (new_target == NULL) {
 4028                                 status = CAM_RESRC_UNAVAIL;
 4029                         } else {
 4030                                 target = new_target;
 4031                         }
 4032                 }
 4033                 if (target != NULL) {
 4034                         device = xpt_find_device(target, lun_id);
 4035                         if (device == NULL) {
 4036                                 /* Create one */
 4037                                 struct cam_ed *new_device;
 4038 
 4039                                 new_device = xpt_alloc_device(bus,
 4040                                                               target,
 4041                                                               lun_id);
 4042                                 if (new_device == NULL) {
 4043                                         status = CAM_RESRC_UNAVAIL;
 4044                                 } else {
 4045                                         device = new_device;
 4046                                 }
 4047                         }
 4048                 }
 4049         }
 4050 
 4051         /*
 4052          * Only touch the user's data if we are successful.
 4053          */
 4054         if (status == CAM_REQ_CMP) {
 4055                 new_path->periph = perph;
 4056                 new_path->bus = bus;
 4057                 new_path->target = target;
 4058                 new_path->device = device;
 4059                 CAM_DEBUG(new_path, CAM_DEBUG_TRACE, ("xpt_compile_path\n"));
 4060         } else {
 4061                 if (device != NULL)
 4062                         xpt_release_device(bus, target, device);
 4063                 if (target != NULL)
 4064                         xpt_release_target(bus, target);
 4065                 if (bus != NULL)
 4066                         xpt_release_bus(bus);
 4067         }
 4068         return (status);
 4069 }
 4070 
 4071 static void
 4072 xpt_release_path(struct cam_path *path)
 4073 {
 4074         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_path\n"));
 4075         if (path->device != NULL) {
 4076                 xpt_release_device(path->bus, path->target, path->device);
 4077                 path->device = NULL;
 4078         }
 4079         if (path->target != NULL) {
 4080                 xpt_release_target(path->bus, path->target);
 4081                 path->target = NULL;
 4082         }
 4083         if (path->bus != NULL) {
 4084                 xpt_release_bus(path->bus);
 4085                 path->bus = NULL;
 4086         }
 4087 }
 4088 
 4089 void
 4090 xpt_free_path(struct cam_path *path)
 4091 {
 4092 
 4093         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_free_path\n"));
 4094         xpt_release_path(path);
 4095         free(path, M_CAMXPT);
 4096 }
 4097 
 4098 
 4099 /*
 4100  * Return -1 for failure, 0 for exact match, 1 for match with wildcards
 4101  * in path1, 2 for match with wildcards in path2.
 4102  */
 4103 int
 4104 xpt_path_comp(struct cam_path *path1, struct cam_path *path2)
 4105 {
 4106         int retval = 0;
 4107 
 4108         if (path1->bus != path2->bus) {
 4109                 if (path1->bus->path_id == CAM_BUS_WILDCARD)
 4110                         retval = 1;
 4111                 else if (path2->bus->path_id == CAM_BUS_WILDCARD)
 4112                         retval = 2;
 4113                 else
 4114                         return (-1);
 4115         }
 4116         if (path1->target != path2->target) {
 4117                 if (path1->target->target_id == CAM_TARGET_WILDCARD) {
 4118                         if (retval == 0)
 4119                                 retval = 1;
 4120                 } else if (path2->target->target_id == CAM_TARGET_WILDCARD)
 4121                         retval = 2;
 4122                 else
 4123                         return (-1);
 4124         }
 4125         if (path1->device != path2->device) {
 4126                 if (path1->device->lun_id == CAM_LUN_WILDCARD) {
 4127                         if (retval == 0)
 4128                                 retval = 1;
 4129                 } else if (path2->device->lun_id == CAM_LUN_WILDCARD)
 4130                         retval = 2;
 4131                 else
 4132                         return (-1);
 4133         }
 4134         return (retval);
 4135 }
 4136 
 4137 void
 4138 xpt_print_path(struct cam_path *path)
 4139 {
 4140 
 4141         if (path == NULL)
 4142                 printf("(nopath): ");
 4143         else {
 4144                 if (path->periph != NULL)
 4145                         printf("(%s%d:", path->periph->periph_name,
 4146                                path->periph->unit_number);
 4147                 else
 4148                         printf("(noperiph:");
 4149 
 4150                 if (path->bus != NULL)
 4151                         printf("%s%d:%d:", path->bus->sim->sim_name,
 4152                                path->bus->sim->unit_number,
 4153                                path->bus->sim->bus_id);
 4154                 else
 4155                         printf("nobus:");
 4156 
 4157                 if (path->target != NULL)
 4158                         printf("%d:", path->target->target_id);
 4159                 else
 4160                         printf("X:");
 4161 
 4162                 if (path->device != NULL)
 4163                         printf("%d): ", path->device->lun_id);
 4164                 else
 4165                         printf("X): ");
 4166         }
 4167 }
 4168 
 4169 void
 4170 xpt_print(struct cam_path *path, const char *fmt, ...)
 4171 {
 4172         va_list ap;
 4173         xpt_print_path(path);
 4174         va_start(ap, fmt);
 4175         vprintf(fmt, ap);
 4176         va_end(ap);
 4177 }
 4178 
 4179 int
 4180 xpt_path_string(struct cam_path *path, char *str, size_t str_len)
 4181 {
 4182         struct sbuf sb;
 4183 
 4184 #ifdef INVARIANTS
 4185         if (path != NULL && path->bus != NULL)
 4186                 mtx_assert(path->bus->sim->mtx, MA_OWNED);
 4187 #endif
 4188 
 4189         sbuf_new(&sb, str, str_len, 0);
 4190 
 4191         if (path == NULL)
 4192                 sbuf_printf(&sb, "(nopath): ");
 4193         else {
 4194                 if (path->periph != NULL)
 4195                         sbuf_printf(&sb, "(%s%d:", path->periph->periph_name,
 4196                                     path->periph->unit_number);
 4197                 else
 4198                         sbuf_printf(&sb, "(noperiph:");
 4199 
 4200                 if (path->bus != NULL)
 4201                         sbuf_printf(&sb, "%s%d:%d:", path->bus->sim->sim_name,
 4202                                     path->bus->sim->unit_number,
 4203                                     path->bus->sim->bus_id);
 4204                 else
 4205                         sbuf_printf(&sb, "nobus:");
 4206 
 4207                 if (path->target != NULL)
 4208                         sbuf_printf(&sb, "%d:", path->target->target_id);
 4209                 else
 4210                         sbuf_printf(&sb, "X:");
 4211 
 4212                 if (path->device != NULL)
 4213                         sbuf_printf(&sb, "%d): ", path->device->lun_id);
 4214                 else
 4215                         sbuf_printf(&sb, "X): ");
 4216         }
 4217         sbuf_finish(&sb);
 4218 
 4219         return(sbuf_len(&sb));
 4220 }
 4221 
 4222 path_id_t
 4223 xpt_path_path_id(struct cam_path *path)
 4224 {
 4225         mtx_assert(path->bus->sim->mtx, MA_OWNED);
 4226 
 4227         return(path->bus->path_id);
 4228 }
 4229 
 4230 target_id_t
 4231 xpt_path_target_id(struct cam_path *path)
 4232 {
 4233         mtx_assert(path->bus->sim->mtx, MA_OWNED);
 4234 
 4235         if (path->target != NULL)
 4236                 return (path->target->target_id);
 4237         else
 4238                 return (CAM_TARGET_WILDCARD);
 4239 }
 4240 
 4241 lun_id_t
 4242 xpt_path_lun_id(struct cam_path *path)
 4243 {
 4244         mtx_assert(path->bus->sim->mtx, MA_OWNED);
 4245 
 4246         if (path->device != NULL)
 4247                 return (path->device->lun_id);
 4248         else
 4249                 return (CAM_LUN_WILDCARD);
 4250 }
 4251 
 4252 struct cam_sim *
 4253 xpt_path_sim(struct cam_path *path)
 4254 {
 4255 
 4256         return (path->bus->sim);
 4257 }
 4258 
 4259 struct cam_periph*
 4260 xpt_path_periph(struct cam_path *path)
 4261 {
 4262         mtx_assert(path->bus->sim->mtx, MA_OWNED);
 4263 
 4264         return (path->periph);
 4265 }
 4266 
 4267 /*
 4268  * Release a CAM control block for the caller.  Remit the cost of the structure
 4269  * to the device referenced by the path.  If the this device had no 'credits'
 4270  * and peripheral drivers have registered async callbacks for this notification
 4271  * call them now.
 4272  */
 4273 void
 4274 xpt_release_ccb(union ccb *free_ccb)
 4275 {
 4276         struct   cam_path *path;
 4277         struct   cam_ed *device;
 4278         struct   cam_eb *bus;
 4279         struct   cam_sim *sim;
 4280 
 4281         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_release_ccb\n"));
 4282         path = free_ccb->ccb_h.path;
 4283         device = path->device;
 4284         bus = path->bus;
 4285         sim = bus->sim;
 4286 
 4287         mtx_assert(sim->mtx, MA_OWNED);
 4288 
 4289         cam_ccbq_release_opening(&device->ccbq);
 4290         if (sim->ccb_count > sim->max_ccbs) {
 4291                 xpt_free_ccb(free_ccb);
 4292                 sim->ccb_count--;
 4293         } else {
 4294                 SLIST_INSERT_HEAD(&sim->ccb_freeq, &free_ccb->ccb_h,
 4295                     xpt_links.sle);
 4296         }
 4297         if (sim->devq == NULL) {
 4298                 return;
 4299         }
 4300         sim->devq->alloc_openings++;
 4301         sim->devq->alloc_active--;
 4302         /* XXX Turn this into an inline function - xpt_run_device?? */
 4303         if ((device_is_alloc_queued(device) == 0)
 4304          && (device->drvq.entries > 0)) {
 4305                 xpt_schedule_dev_allocq(bus, device);
 4306         }
 4307         if (dev_allocq_is_runnable(sim->devq))
 4308                 xpt_run_dev_allocq(bus);
 4309 }
 4310 
 4311 /* Functions accessed by SIM drivers */
 4312 
 4313 /*
 4314  * A sim structure, listing the SIM entry points and instance
 4315  * identification info is passed to xpt_bus_register to hook the SIM
 4316  * into the CAM framework.  xpt_bus_register creates a cam_eb entry
 4317  * for this new bus and places it in the array of busses and assigns
 4318  * it a path_id.  The path_id may be influenced by "hard wiring"
 4319  * information specified by the user.  Once interrupt services are
 4320  * available, the bus will be probed.
 4321  */
 4322 int32_t
 4323 xpt_bus_register(struct cam_sim *sim, device_t parent, u_int32_t bus)
 4324 {
 4325         struct cam_eb *new_bus;
 4326         struct cam_eb *old_bus;
 4327         struct ccb_pathinq cpi;
 4328 
 4329         mtx_assert(sim->mtx, MA_OWNED);
 4330 
 4331         sim->bus_id = bus;
 4332         new_bus = (struct cam_eb *)malloc(sizeof(*new_bus),
 4333                                           M_CAMXPT, M_NOWAIT);
 4334         if (new_bus == NULL) {
 4335                 /* Couldn't satisfy request */
 4336                 return (CAM_RESRC_UNAVAIL);
 4337         }
 4338 
 4339         if (strcmp(sim->sim_name, "xpt") != 0) {
 4340 
 4341                 sim->path_id =
 4342                     xptpathid(sim->sim_name, sim->unit_number, sim->bus_id);
 4343         }
 4344 
 4345         TAILQ_INIT(&new_bus->et_entries);
 4346         new_bus->path_id = sim->path_id;
 4347         cam_sim_hold(sim);
 4348         new_bus->sim = sim;
 4349         timevalclear(&new_bus->last_reset);
 4350         new_bus->flags = 0;
 4351         new_bus->refcount = 1;  /* Held until a bus_deregister event */
 4352         new_bus->generation = 0;
 4353         mtx_lock(&xsoftc.xpt_topo_lock);
 4354         old_bus = TAILQ_FIRST(&xsoftc.xpt_busses);
 4355         while (old_bus != NULL
 4356             && old_bus->path_id < new_bus->path_id)
 4357                 old_bus = TAILQ_NEXT(old_bus, links);
 4358         if (old_bus != NULL)
 4359                 TAILQ_INSERT_BEFORE(old_bus, new_bus, links);
 4360         else
 4361                 TAILQ_INSERT_TAIL(&xsoftc.xpt_busses, new_bus, links);
 4362         xsoftc.bus_generation++;
 4363         mtx_unlock(&xsoftc.xpt_topo_lock);
 4364 
 4365         /* Notify interested parties */
 4366         if (sim->path_id != CAM_XPT_PATH_ID) {
 4367                 struct cam_path path;
 4368 
 4369                 xpt_compile_path(&path, /*periph*/NULL, sim->path_id,
 4370                                  CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
 4371                 xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
 4372                 cpi.ccb_h.func_code = XPT_PATH_INQ;
 4373                 xpt_action((union ccb *)&cpi);
 4374                 xpt_async(AC_PATH_REGISTERED, &path, &cpi);
 4375                 xpt_release_path(&path);
 4376         }
 4377         return (CAM_SUCCESS);
 4378 }
 4379 
 4380 int32_t
 4381 xpt_bus_deregister(path_id_t pathid)
 4382 {
 4383         struct cam_path bus_path;
 4384         cam_status status;
 4385 
 4386         status = xpt_compile_path(&bus_path, NULL, pathid,
 4387                                   CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
 4388         if (status != CAM_REQ_CMP)
 4389                 return (status);
 4390 
 4391         xpt_async(AC_LOST_DEVICE, &bus_path, NULL);
 4392         xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL);
 4393 
 4394         /* Release the reference count held while registered. */
 4395         xpt_release_bus(bus_path.bus);
 4396         xpt_release_path(&bus_path);
 4397 
 4398         return (CAM_REQ_CMP);
 4399 }
 4400 
 4401 static path_id_t
 4402 xptnextfreepathid(void)
 4403 {
 4404         struct cam_eb *bus;
 4405         path_id_t pathid;
 4406         const char *strval;
 4407 
 4408         pathid = 0;
 4409         mtx_lock(&xsoftc.xpt_topo_lock);
 4410         bus = TAILQ_FIRST(&xsoftc.xpt_busses);
 4411 retry:
 4412         /* Find an unoccupied pathid */
 4413         while (bus != NULL && bus->path_id <= pathid) {
 4414                 if (bus->path_id == pathid)
 4415                         pathid++;
 4416                 bus = TAILQ_NEXT(bus, links);
 4417         }
 4418         mtx_unlock(&xsoftc.xpt_topo_lock);
 4419 
 4420         /*
 4421          * Ensure that this pathid is not reserved for
 4422          * a bus that may be registered in the future.
 4423          */
 4424         if (resource_string_value("scbus", pathid, "at", &strval) == 0) {
 4425                 ++pathid;
 4426                 /* Start the search over */
 4427                 mtx_lock(&xsoftc.xpt_topo_lock);
 4428                 goto retry;
 4429         }
 4430         return (pathid);
 4431 }
 4432 
 4433 static path_id_t
 4434 xptpathid(const char *sim_name, int sim_unit, int sim_bus)
 4435 {
 4436         path_id_t pathid;
 4437         int i, dunit, val;
 4438         char buf[32];
 4439         const char *dname;
 4440 
 4441         pathid = CAM_XPT_PATH_ID;
 4442         snprintf(buf, sizeof(buf), "%s%d", sim_name, sim_unit);
 4443         i = 0;
 4444         while ((resource_find_match(&i, &dname, &dunit, "at", buf)) == 0) {
 4445                 if (strcmp(dname, "scbus")) {
 4446                         /* Avoid a bit of foot shooting. */
 4447                         continue;
 4448                 }
 4449                 if (dunit < 0)          /* unwired?! */
 4450                         continue;
 4451                 if (resource_int_value("scbus", dunit, "bus", &val) == 0) {
 4452                         if (sim_bus == val) {
 4453                                 pathid = dunit;
 4454                                 break;
 4455                         }
 4456                 } else if (sim_bus == 0) {
 4457                         /* Unspecified matches bus 0 */
 4458                         pathid = dunit;
 4459                         break;
 4460                 } else {
 4461                         printf("Ambiguous scbus configuration for %s%d "
 4462                                "bus %d, cannot wire down.  The kernel "
 4463                                "config entry for scbus%d should "
 4464                                "specify a controller bus.\n"
 4465                                "Scbus will be assigned dynamically.\n",
 4466                                sim_name, sim_unit, sim_bus, dunit);
 4467                         break;
 4468                 }
 4469         }
 4470 
 4471         if (pathid == CAM_XPT_PATH_ID)
 4472                 pathid = xptnextfreepathid();
 4473         return (pathid);
 4474 }
 4475 
 4476 void
 4477 xpt_async(u_int32_t async_code, struct cam_path *path, void *async_arg)
 4478 {
 4479         struct cam_eb *bus;
 4480         struct cam_et *target, *next_target;
 4481         struct cam_ed *device, *next_device;
 4482 
 4483         mtx_assert(path->bus->sim->mtx, MA_OWNED);
 4484 
 4485         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_async\n"));
 4486 
 4487         /*
 4488          * Most async events come from a CAM interrupt context.  In
 4489          * a few cases, the error recovery code at the peripheral layer,
 4490          * which may run from our SWI or a process context, may signal
 4491          * deferred events with a call to xpt_async.
 4492          */
 4493 
 4494         bus = path->bus;
 4495 
 4496         if (async_code == AC_BUS_RESET) {
 4497                 /* Update our notion of when the last reset occurred */
 4498                 microtime(&bus->last_reset);
 4499         }
 4500 
 4501         for (target = TAILQ_FIRST(&bus->et_entries);
 4502              target != NULL;
 4503              target = next_target) {
 4504 
 4505                 next_target = TAILQ_NEXT(target, links);
 4506 
 4507                 if (path->target != target
 4508                  && path->target->target_id != CAM_TARGET_WILDCARD
 4509                  && target->target_id != CAM_TARGET_WILDCARD)
 4510                         continue;
 4511 
 4512                 if (async_code == AC_SENT_BDR) {
 4513                         /* Update our notion of when the last reset occurred */
 4514                         microtime(&path->target->last_reset);
 4515                 }
 4516 
 4517                 for (device = TAILQ_FIRST(&target->ed_entries);
 4518                      device != NULL;
 4519                      device = next_device) {
 4520 
 4521                         next_device = TAILQ_NEXT(device, links);
 4522 
 4523                         if (path->device != device
 4524                          && path->device->lun_id != CAM_LUN_WILDCARD
 4525                          && device->lun_id != CAM_LUN_WILDCARD)
 4526                                 continue;
 4527 
 4528                         xpt_dev_async(async_code, bus, target,
 4529                                       device, async_arg);
 4530 
 4531                         xpt_async_bcast(&device->asyncs, async_code,
 4532                                         path, async_arg);
 4533                 }
 4534         }
 4535 
 4536         /*
 4537          * If this wasn't a fully wildcarded async, tell all
 4538          * clients that want all async events.
 4539          */
 4540         if (bus != xpt_periph->path->bus)
 4541                 xpt_async_bcast(&xpt_periph->path->device->asyncs, async_code,
 4542                                 path, async_arg);
 4543 }
 4544 
 4545 static void
 4546 xpt_async_bcast(struct async_list *async_head,
 4547                 u_int32_t async_code,
 4548                 struct cam_path *path, void *async_arg)
 4549 {
 4550         struct async_node *cur_entry;
 4551 
 4552         cur_entry = SLIST_FIRST(async_head);
 4553         while (cur_entry != NULL) {
 4554                 struct async_node *next_entry;
 4555                 /*
 4556                  * Grab the next list entry before we call the current
 4557                  * entry's callback.  This is because the callback function
 4558                  * can delete its async callback entry.
 4559                  */
 4560                 next_entry = SLIST_NEXT(cur_entry, links);
 4561                 if ((cur_entry->event_enable & async_code) != 0)
 4562                         cur_entry->callback(cur_entry->callback_arg,
 4563                                             async_code, path,
 4564                                             async_arg);
 4565                 cur_entry = next_entry;
 4566         }
 4567 }
 4568 
 4569 /*
 4570  * Handle any per-device event notifications that require action by the XPT.
 4571  */
 4572 static void
 4573 xpt_dev_async(u_int32_t async_code, struct cam_eb *bus, struct cam_et *target,
 4574               struct cam_ed *device, void *async_arg)
 4575 {
 4576         cam_status status;
 4577         struct cam_path newpath;
 4578 
 4579         /*
 4580          * We only need to handle events for real devices.
 4581          */
 4582         if (target->target_id == CAM_TARGET_WILDCARD
 4583          || device->lun_id == CAM_LUN_WILDCARD)
 4584                 return;
 4585 
 4586         /*
 4587          * We need our own path with wildcards expanded to
 4588          * handle certain types of events.
 4589          */
 4590         if ((async_code == AC_SENT_BDR)
 4591          || (async_code == AC_BUS_RESET)
 4592          || (async_code == AC_INQ_CHANGED))
 4593                 status = xpt_compile_path(&newpath, NULL,
 4594                                           bus->path_id,
 4595                                           target->target_id,
 4596                                           device->lun_id);
 4597         else
 4598                 status = CAM_REQ_CMP_ERR;
 4599 
 4600         if (status == CAM_REQ_CMP) {
 4601 
 4602                 /*
 4603                  * Allow transfer negotiation to occur in a
 4604                  * tag free environment.
 4605                  */
 4606                 if (async_code == AC_SENT_BDR
 4607                  || async_code == AC_BUS_RESET)
 4608                         xpt_toggle_tags(&newpath);
 4609 
 4610                 if (async_code == AC_INQ_CHANGED) {
 4611                         /*
 4612                          * We've sent a start unit command, or
 4613                          * something similar to a device that
 4614                          * may have caused its inquiry data to
 4615                          * change. So we re-scan the device to
 4616                          * refresh the inquiry data for it.
 4617                          */
 4618                         xpt_scan_lun(newpath.periph, &newpath,
 4619                                      CAM_EXPECT_INQ_CHANGE, NULL);
 4620                 }
 4621                 xpt_release_path(&newpath);
 4622         } else if (async_code == AC_LOST_DEVICE) {
 4623                 device->flags |= CAM_DEV_UNCONFIGURED;
 4624         } else if (async_code == AC_TRANSFER_NEG) {
 4625                 struct ccb_trans_settings *settings;
 4626 
 4627                 settings = (struct ccb_trans_settings *)async_arg;
 4628                 xpt_set_transfer_settings(settings, device,
 4629                                           /*async_update*/TRUE);
 4630         }
 4631 }
 4632 
 4633 u_int32_t
 4634 xpt_freeze_devq(struct cam_path *path, u_int count)
 4635 {
 4636         struct ccb_hdr *ccbh;
 4637 
 4638         mtx_assert(path->bus->sim->mtx, MA_OWNED);
 4639 
 4640         path->device->qfrozen_cnt += count;
 4641 
 4642         /*
 4643          * Mark the last CCB in the queue as needing
 4644          * to be requeued if the driver hasn't
 4645          * changed it's state yet.  This fixes a race
 4646          * where a ccb is just about to be queued to
 4647          * a controller driver when it's interrupt routine
 4648          * freezes the queue.  To completly close the
 4649          * hole, controller drives must check to see
 4650          * if a ccb's status is still CAM_REQ_INPROG
 4651          * just before they queue
 4652          * the CCB.  See ahc_action/ahc_freeze_devq for
 4653          * an example.
 4654          */
 4655         ccbh = TAILQ_LAST(&path->device->ccbq.active_ccbs, ccb_hdr_tailq);
 4656         if (ccbh && ccbh->status == CAM_REQ_INPROG)
 4657                 ccbh->status = CAM_REQUEUE_REQ;
 4658         return (path->device->qfrozen_cnt);
 4659 }
 4660 
 4661 u_int32_t
 4662 xpt_freeze_simq(struct cam_sim *sim, u_int count)
 4663 {
 4664         mtx_assert(sim->mtx, MA_OWNED);
 4665 
 4666         sim->devq->send_queue.qfrozen_cnt += count;
 4667         if (sim->devq->active_dev != NULL) {
 4668                 struct ccb_hdr *ccbh;
 4669 
 4670                 ccbh = TAILQ_LAST(&sim->devq->active_dev->ccbq.active_ccbs,
 4671                                   ccb_hdr_tailq);
 4672                 if (ccbh && ccbh->status == CAM_REQ_INPROG)
 4673                         ccbh->status = CAM_REQUEUE_REQ;
 4674         }
 4675         return (sim->devq->send_queue.qfrozen_cnt);
 4676 }
 4677 
 4678 static void
 4679 xpt_release_devq_timeout(void *arg)
 4680 {
 4681         struct cam_ed *device;
 4682 
 4683         device = (struct cam_ed *)arg;
 4684 
 4685         xpt_release_devq_device(device, /*count*/1, /*run_queue*/TRUE);
 4686 }
 4687 
 4688 void
 4689 xpt_release_devq(struct cam_path *path, u_int count, int run_queue)
 4690 {
 4691         mtx_assert(path->bus->sim->mtx, MA_OWNED);
 4692 
 4693         xpt_release_devq_device(path->device, count, run_queue);
 4694 }
 4695 
 4696 static void
 4697 xpt_release_devq_device(struct cam_ed *dev, u_int count, int run_queue)
 4698 {
 4699         int     rundevq;
 4700 
 4701         rundevq = 0;
 4702         if (dev->qfrozen_cnt > 0) {
 4703 
 4704                 count = (count > dev->qfrozen_cnt) ? dev->qfrozen_cnt : count;
 4705                 dev->qfrozen_cnt -= count;
 4706                 if (dev->qfrozen_cnt == 0) {
 4707 
 4708                         /*
 4709                          * No longer need to wait for a successful
 4710                          * command completion.
 4711                          */
 4712                         dev->flags &= ~CAM_DEV_REL_ON_COMPLETE;
 4713 
 4714                         /*
 4715                          * Remove any timeouts that might be scheduled
 4716                          * to release this queue.
 4717                          */
 4718                         if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
 4719                                 callout_stop(&dev->callout);
 4720                                 dev->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING;
 4721                         }
 4722 
 4723                         /*
 4724                          * Now that we are unfrozen schedule the
 4725                          * device so any pending transactions are
 4726                          * run.
 4727                          */
 4728                         if ((dev->ccbq.queue.entries > 0)
 4729                          && (xpt_schedule_dev_sendq(dev->target->bus, dev))
 4730                          && (run_queue != 0)) {
 4731                                 rundevq = 1;
 4732                         }
 4733                 }
 4734         }
 4735         if (rundevq != 0)
 4736                 xpt_run_dev_sendq(dev->target->bus);
 4737 }
 4738 
 4739 void
 4740 xpt_release_simq(struct cam_sim *sim, int run_queue)
 4741 {
 4742         struct  camq *sendq;
 4743 
 4744         mtx_assert(sim->mtx, MA_OWNED);
 4745 
 4746         sendq = &(sim->devq->send_queue);
 4747         if (sendq->qfrozen_cnt > 0) {
 4748 
 4749                 sendq->qfrozen_cnt--;
 4750                 if (sendq->qfrozen_cnt == 0) {
 4751                         struct cam_eb *bus;
 4752 
 4753                         /*
 4754                          * If there is a timeout scheduled to release this
 4755                          * sim queue, remove it.  The queue frozen count is
 4756                          * already at 0.
 4757                          */
 4758                         if ((sim->flags & CAM_SIM_REL_TIMEOUT_PENDING) != 0){
 4759                                 callout_stop(&sim->callout);
 4760                                 sim->flags &= ~CAM_SIM_REL_TIMEOUT_PENDING;
 4761                         }
 4762                         bus = xpt_find_bus(sim->path_id);
 4763 
 4764                         if (run_queue) {
 4765                                 /*
 4766                                  * Now that we are unfrozen run the send queue.
 4767                                  */
 4768                                 xpt_run_dev_sendq(bus);
 4769                         }
 4770                         xpt_release_bus(bus);
 4771                 }
 4772         }
 4773 }
 4774 
 4775 /*
 4776  * XXX Appears to be unused.
 4777  */
 4778 static void
 4779 xpt_release_simq_timeout(void *arg)
 4780 {
 4781         struct cam_sim *sim;
 4782 
 4783         sim = (struct cam_sim *)arg;
 4784         xpt_release_simq(sim, /* run_queue */ TRUE);
 4785 }
 4786 
 4787 void
 4788 xpt_done(union ccb *done_ccb)
 4789 {
 4790         struct cam_sim *sim;
 4791 
 4792         CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_done\n"));
 4793         if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) != 0) {
 4794                 /*
 4795                  * Queue up the request for handling by our SWI handler
 4796                  * any of the "non-immediate" type of ccbs.
 4797                  */
 4798                 sim = done_ccb->ccb_h.path->bus->sim;
 4799                 switch (done_ccb->ccb_h.path->periph->type) {
 4800                 case CAM_PERIPH_BIO:
 4801                         TAILQ_INSERT_TAIL(&sim->sim_doneq, &done_ccb->ccb_h,
 4802                                           sim_links.tqe);
 4803                         done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX;
 4804                         if ((sim->flags & CAM_SIM_ON_DONEQ) == 0) {
 4805                                 mtx_lock(&cam_simq_lock);
 4806                                 TAILQ_INSERT_TAIL(&cam_simq, sim,
 4807                                                   links);
 4808                                 sim->flags |= CAM_SIM_ON_DONEQ;
 4809                                 mtx_unlock(&cam_simq_lock);
 4810                         }
 4811                         if ((done_ccb->ccb_h.path->periph->flags &
 4812                             CAM_PERIPH_POLLED) == 0)
 4813                                 swi_sched(cambio_ih, 0);
 4814                         break;
 4815                 default:
 4816                         panic("unknown periph type %d",
 4817                             done_ccb->ccb_h.path->periph->type);
 4818                 }
 4819         }
 4820 }
 4821 
 4822 union ccb *
 4823 xpt_alloc_ccb()
 4824 {
 4825         union ccb *new_ccb;
 4826 
 4827         new_ccb = malloc(sizeof(*new_ccb), M_CAMXPT, M_ZERO|M_WAITOK);
 4828         return (new_ccb);
 4829 }
 4830 
 4831 union ccb *
 4832 xpt_alloc_ccb_nowait()
 4833 {
 4834         union ccb *new_ccb;
 4835 
 4836         new_ccb = malloc(sizeof(*new_ccb), M_CAMXPT, M_ZERO|M_NOWAIT);
 4837         return (new_ccb);
 4838 }
 4839 
 4840 void
 4841 xpt_free_ccb(union ccb *free_ccb)
 4842 {
 4843         free(free_ccb, M_CAMXPT);
 4844 }
 4845 
 4846 
 4847 
 4848 /* Private XPT functions */
 4849 
 4850 /*
 4851  * Get a CAM control block for the caller. Charge the structure to the device
 4852  * referenced by the path.  If the this device has no 'credits' then the
 4853  * device already has the maximum number of outstanding operations under way
 4854  * and we return NULL. If we don't have sufficient resources to allocate more
 4855  * ccbs, we also return NULL.
 4856  */
 4857 static union ccb *
 4858 xpt_get_ccb(struct cam_ed *device)
 4859 {
 4860         union ccb *new_ccb;
 4861         struct cam_sim *sim;
 4862 
 4863         sim = device->sim;
 4864         if ((new_ccb = (union ccb *)SLIST_FIRST(&sim->ccb_freeq)) == NULL) {
 4865                 new_ccb = xpt_alloc_ccb_nowait();
 4866                 if (new_ccb == NULL) {
 4867                         return (NULL);
 4868                 }
 4869                 if ((sim->flags & CAM_SIM_MPSAFE) == 0)
 4870                         callout_handle_init(&new_ccb->ccb_h.timeout_ch);
 4871                 SLIST_INSERT_HEAD(&sim->ccb_freeq, &new_ccb->ccb_h,
 4872                                   xpt_links.sle);
 4873                 sim->ccb_count++;
 4874         }
 4875         cam_ccbq_take_opening(&device->ccbq);
 4876         SLIST_REMOVE_HEAD(&sim->ccb_freeq, xpt_links.sle);
 4877         return (new_ccb);
 4878 }
 4879 
 4880 static void
 4881 xpt_release_bus(struct cam_eb *bus)
 4882 {
 4883 
 4884         if ((--bus->refcount == 0)
 4885          && (TAILQ_FIRST(&bus->et_entries) == NULL)) {
 4886                 mtx_lock(&xsoftc.xpt_topo_lock);
 4887                 TAILQ_REMOVE(&xsoftc.xpt_busses, bus, links);
 4888                 xsoftc.bus_generation++;
 4889                 mtx_unlock(&xsoftc.xpt_topo_lock);
 4890                 cam_sim_release(bus->sim);
 4891                 free(bus, M_CAMXPT);
 4892         }
 4893 }
 4894 
 4895 static struct cam_et *
 4896 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id)
 4897 {
 4898         struct cam_et *target;
 4899 
 4900         target = (struct cam_et *)malloc(sizeof(*target), M_CAMXPT, M_NOWAIT);
 4901         if (target != NULL) {
 4902                 struct cam_et *cur_target;
 4903 
 4904                 TAILQ_INIT(&target->ed_entries);
 4905                 target->bus = bus;
 4906                 target->target_id = target_id;
 4907                 target->refcount = 1;
 4908                 target->generation = 0;
 4909                 timevalclear(&target->last_reset);
 4910                 /*
 4911                  * Hold a reference to our parent bus so it
 4912                  * will not go away before we do.
 4913                  */
 4914                 bus->refcount++;
 4915 
 4916                 /* Insertion sort into our bus's target list */
 4917                 cur_target = TAILQ_FIRST(&bus->et_entries);
 4918                 while (cur_target != NULL && cur_target->target_id < target_id)
 4919                         cur_target = TAILQ_NEXT(cur_target, links);
 4920 
 4921                 if (cur_target != NULL) {
 4922                         TAILQ_INSERT_BEFORE(cur_target, target, links);
 4923                 } else {
 4924                         TAILQ_INSERT_TAIL(&bus->et_entries, target, links);
 4925                 }
 4926                 bus->generation++;
 4927         }
 4928         return (target);
 4929 }
 4930 
 4931 static void
 4932 xpt_release_target(struct cam_eb *bus, struct cam_et *target)
 4933 {
 4934 
 4935         if ((--target->refcount == 0)
 4936          && (TAILQ_FIRST(&target->ed_entries) == NULL)) {
 4937                 TAILQ_REMOVE(&bus->et_entries, target, links);
 4938                 bus->generation++;
 4939                 free(target, M_CAMXPT);
 4940                 xpt_release_bus(bus);
 4941         }
 4942 }
 4943 
 4944 static struct cam_ed *
 4945 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id)
 4946 {
 4947         struct     cam_path path;
 4948         struct     cam_ed *device;
 4949         struct     cam_devq *devq;
 4950         cam_status status;
 4951 
 4952         /* Make space for us in the device queue on our bus */
 4953         devq = bus->sim->devq;
 4954         status = cam_devq_resize(devq, devq->alloc_queue.array_size + 1);
 4955 
 4956         if (status != CAM_REQ_CMP) {
 4957                 device = NULL;
 4958         } else {
 4959                 device = (struct cam_ed *)malloc(sizeof(*device),
 4960                                                  M_CAMXPT, M_NOWAIT);
 4961         }
 4962 
 4963         if (device != NULL) {
 4964                 struct cam_ed *cur_device;
 4965 
 4966                 cam_init_pinfo(&device->alloc_ccb_entry.pinfo);
 4967                 device->alloc_ccb_entry.device = device;
 4968                 cam_init_pinfo(&device->send_ccb_entry.pinfo);
 4969                 device->send_ccb_entry.device = device;
 4970                 device->target = target;
 4971                 device->lun_id = lun_id;
 4972                 device->sim = bus->sim;
 4973                 /* Initialize our queues */
 4974                 if (camq_init(&device->drvq, 0) != 0) {
 4975                         free(device, M_CAMXPT);
 4976                         return (NULL);
 4977                 }
 4978                 if (cam_ccbq_init(&device->ccbq,
 4979                                   bus->sim->max_dev_openings) != 0) {
 4980                         camq_fini(&device->drvq);
 4981                         free(device, M_CAMXPT);
 4982                         return (NULL);
 4983                 }
 4984                 SLIST_INIT(&device->asyncs);
 4985                 SLIST_INIT(&device->periphs);
 4986                 device->generation = 0;
 4987                 device->owner = NULL;
 4988                 /*
 4989                  * Take the default quirk entry until we have inquiry
 4990                  * data and can determine a better quirk to use.
 4991                  */
 4992                 device->quirk = &xpt_quirk_table[xpt_quirk_table_size - 1];
 4993                 bzero(&device->inq_data, sizeof(device->inq_data));
 4994                 device->inq_flags = 0;
 4995                 device->queue_flags = 0;
 4996                 device->serial_num = NULL;
 4997                 device->serial_num_len = 0;
 4998                 device->qfrozen_cnt = 0;
 4999                 device->flags = CAM_DEV_UNCONFIGURED;
 5000                 device->tag_delay_count = 0;
 5001                 device->tag_saved_openings = 0;
 5002                 device->refcount = 1;
 5003                 if (bus->sim->flags & CAM_SIM_MPSAFE)
 5004                         callout_init_mtx(&device->callout, bus->sim->mtx, 0);
 5005                 else
 5006                         callout_init_mtx(&device->callout, &Giant, 0);
 5007 
 5008                 /*
 5009                  * Hold a reference to our parent target so it
 5010                  * will not go away before we do.
 5011                  */
 5012                 target->refcount++;
 5013 
 5014                 /*
 5015                  * XXX should be limited by number of CCBs this bus can
 5016                  * do.
 5017                  */
 5018                 bus->sim->max_ccbs += device->ccbq.devq_openings;
 5019                 /* Insertion sort into our target's device list */
 5020                 cur_device = TAILQ_FIRST(&target->ed_entries);
 5021                 while (cur_device != NULL && cur_device->lun_id < lun_id)
 5022                         cur_device = TAILQ_NEXT(cur_device, links);
 5023                 if (cur_device != NULL) {
 5024                         TAILQ_INSERT_BEFORE(cur_device, device, links);
 5025                 } else {
 5026                         TAILQ_INSERT_TAIL(&target->ed_entries, device, links);
 5027                 }
 5028                 target->generation++;
 5029                 if (lun_id != CAM_LUN_WILDCARD) {
 5030                         xpt_compile_path(&path,
 5031                                          NULL,
 5032                                          bus->path_id,
 5033                                          target->target_id,
 5034                                          lun_id);
 5035                         xpt_devise_transport(&path);
 5036                         xpt_release_path(&path);
 5037                 }
 5038         }
 5039         return (device);
 5040 }
 5041 
 5042 static void
 5043 xpt_release_device(struct cam_eb *bus, struct cam_et *target,
 5044                    struct cam_ed *device)
 5045 {
 5046 
 5047         if ((--device->refcount == 0)
 5048          && ((device->flags & CAM_DEV_UNCONFIGURED) != 0)) {
 5049                 struct cam_devq *devq;
 5050 
 5051                 if (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX
 5052                  || device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX)
 5053                         panic("Removing device while still queued for ccbs");
 5054 
 5055                 if ((device->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0)
 5056                                 callout_stop(&device->callout);
 5057 
 5058                 TAILQ_REMOVE(&target->ed_entries, device,links);
 5059                 target->generation++;
 5060                 bus->sim->max_ccbs -= device->ccbq.devq_openings;
 5061                 /* Release our slot in the devq */
 5062                 devq = bus->sim->devq;
 5063                 cam_devq_resize(devq, devq->alloc_queue.array_size - 1);
 5064                 camq_fini(&device->drvq);
 5065                 camq_fini(&device->ccbq.queue);
 5066                 free(device, M_CAMXPT);
 5067                 xpt_release_target(bus, target);
 5068         }
 5069 }
 5070 
 5071 static u_int32_t
 5072 xpt_dev_ccbq_resize(struct cam_path *path, int newopenings)
 5073 {
 5074         int     diff;
 5075         int     result;
 5076         struct  cam_ed *dev;
 5077 
 5078         dev = path->device;
 5079 
 5080         diff = newopenings - (dev->ccbq.dev_active + dev->ccbq.dev_openings);
 5081         result = cam_ccbq_resize(&dev->ccbq, newopenings);
 5082         if (result == CAM_REQ_CMP && (diff < 0)) {
 5083                 dev->flags |= CAM_DEV_RESIZE_QUEUE_NEEDED;
 5084         }
 5085         if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
 5086          || (dev->inq_flags & SID_CmdQue) != 0)
 5087                 dev->tag_saved_openings = newopenings;
 5088         /* Adjust the global limit */
 5089         dev->sim->max_ccbs += diff;
 5090         return (result);
 5091 }
 5092 
 5093 static struct cam_eb *
 5094 xpt_find_bus(path_id_t path_id)
 5095 {
 5096         struct cam_eb *bus;
 5097 
 5098         mtx_lock(&xsoftc.xpt_topo_lock);
 5099         for (bus = TAILQ_FIRST(&xsoftc.xpt_busses);
 5100              bus != NULL;
 5101              bus = TAILQ_NEXT(bus, links)) {
 5102                 if (bus->path_id == path_id) {
 5103                         bus->refcount++;
 5104                         break;
 5105                 }
 5106         }
 5107         mtx_unlock(&xsoftc.xpt_topo_lock);
 5108         return (bus);
 5109 }
 5110 
 5111 static struct cam_et *
 5112 xpt_find_target(struct cam_eb *bus, target_id_t target_id)
 5113 {
 5114         struct cam_et *target;
 5115 
 5116         for (target = TAILQ_FIRST(&bus->et_entries);
 5117              target != NULL;
 5118              target = TAILQ_NEXT(target, links)) {
 5119                 if (target->target_id == target_id) {
 5120                         target->refcount++;
 5121                         break;
 5122                 }
 5123         }
 5124         return (target);
 5125 }
 5126 
 5127 static struct cam_ed *
 5128 xpt_find_device(struct cam_et *target, lun_id_t lun_id)
 5129 {
 5130         struct cam_ed *device;
 5131 
 5132         for (device = TAILQ_FIRST(&target->ed_entries);
 5133              device != NULL;
 5134              device = TAILQ_NEXT(device, links)) {
 5135                 if (device->lun_id == lun_id) {
 5136                         device->refcount++;
 5137                         break;
 5138                 }
 5139         }
 5140         return (device);
 5141 }
 5142 
 5143 typedef struct {
 5144         union   ccb *request_ccb;
 5145         struct  ccb_pathinq *cpi;
 5146         int     counter;
 5147 } xpt_scan_bus_info;
 5148 
 5149 /*
 5150  * To start a scan, request_ccb is an XPT_SCAN_BUS ccb.
 5151  * As the scan progresses, xpt_scan_bus is used as the
 5152  * callback on completion function.
 5153  */
 5154 static void
 5155 xpt_scan_bus(struct cam_periph *periph, union ccb *request_ccb)
 5156 {
 5157         CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE,
 5158                   ("xpt_scan_bus\n"));
 5159         switch (request_ccb->ccb_h.func_code) {
 5160         case XPT_SCAN_BUS:
 5161         {
 5162                 xpt_scan_bus_info *scan_info;
 5163                 union   ccb *work_ccb;
 5164                 struct  cam_path *path;
 5165                 u_int   i;
 5166                 u_int   max_target;
 5167                 u_int   initiator_id;
 5168 
 5169                 /* Find out the characteristics of the bus */
 5170                 work_ccb = xpt_alloc_ccb_nowait();
 5171                 if (work_ccb == NULL) {
 5172                         request_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
 5173                         xpt_done(request_ccb);
 5174                         return;
 5175                 }
 5176                 xpt_setup_ccb(&work_ccb->ccb_h, request_ccb->ccb_h.path,
 5177                               request_ccb->ccb_h.pinfo.priority);
 5178                 work_ccb->ccb_h.func_code = XPT_PATH_INQ;
 5179                 xpt_action(work_ccb);
 5180                 if (work_ccb->ccb_h.status != CAM_REQ_CMP) {
 5181                         request_ccb->ccb_h.status = work_ccb->ccb_h.status;
 5182                         xpt_free_ccb(work_ccb);
 5183                         xpt_done(request_ccb);
 5184                         return;
 5185                 }
 5186 
 5187                 if ((work_ccb->cpi.hba_misc & PIM_NOINITIATOR) != 0) {
 5188                         /*
 5189                          * Can't scan the bus on an adapter that
 5190                          * cannot perform the initiator role.
 5191                          */
 5192                         request_ccb->ccb_h.status = CAM_REQ_CMP;
 5193                         xpt_free_ccb(work_ccb);
 5194                         xpt_done(request_ccb);
 5195                         return;
 5196                 }
 5197 
 5198                 /* Save some state for use while we probe for devices */
 5199                 scan_info = (xpt_scan_bus_info *)
 5200                     malloc(sizeof(xpt_scan_bus_info), M_CAMXPT, M_NOWAIT);
 5201                 if (scan_info == NULL) {
 5202                         request_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
 5203                         xpt_done(request_ccb);
 5204                         return;
 5205                 }
 5206                 scan_info->request_ccb = request_ccb;
 5207                 scan_info->cpi = &work_ccb->cpi;
 5208 
 5209                 /* Cache on our stack so we can work asynchronously */
 5210                 max_target = scan_info->cpi->max_target;
 5211                 initiator_id = scan_info->cpi->initiator_id;
 5212 
 5213 
 5214                 /*
 5215                  * We can scan all targets in parallel, or do it sequentially.
 5216                  */
 5217                 if (scan_info->cpi->hba_misc & PIM_SEQSCAN) {
 5218                         max_target = 0;
 5219                         scan_info->counter = 0;
 5220                 } else {
 5221                         scan_info->counter = scan_info->cpi->max_target + 1;
 5222                         if (scan_info->cpi->initiator_id < scan_info->counter) {
 5223                                 scan_info->counter--;
 5224                         }
 5225                 }
 5226 
 5227                 for (i = 0; i <= max_target; i++) {
 5228                         cam_status status;
 5229                         if (i == initiator_id)
 5230                                 continue;
 5231 
 5232                         status = xpt_create_path(&path, xpt_periph,
 5233                                                  request_ccb->ccb_h.path_id,
 5234                                                  i, 0);
 5235                         if (status != CAM_REQ_CMP) {
 5236                                 printf("xpt_scan_bus: xpt_create_path failed"
 5237                                        " with status %#x, bus scan halted\n",
 5238                                        status);
 5239                                 free(scan_info, M_CAMXPT);
 5240                                 request_ccb->ccb_h.status = status;
 5241                                 xpt_free_ccb(work_ccb);
 5242                                 xpt_done(request_ccb);
 5243                                 break;
 5244                         }
 5245                         work_ccb = xpt_alloc_ccb_nowait();
 5246                         if (work_ccb == NULL) {
 5247                                 xpt_free_ccb((union ccb *)scan_info->cpi);
 5248                                 free(scan_info, M_CAMXPT);
 5249                                 xpt_free_path(path);
 5250                                 request_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
 5251                                 xpt_done(request_ccb);
 5252                                 break;
 5253                         }
 5254                         xpt_setup_ccb(&work_ccb->ccb_h, path,
 5255                                       request_ccb->ccb_h.pinfo.priority);
 5256                         work_ccb->ccb_h.func_code = XPT_SCAN_LUN;
 5257                         work_ccb->ccb_h.cbfcnp = xpt_scan_bus;
 5258                         work_ccb->ccb_h.ppriv_ptr0 = scan_info;
 5259                         work_ccb->crcn.flags = request_ccb->crcn.flags;
 5260                         xpt_action(work_ccb);
 5261                 }
 5262                 break;
 5263         }
 5264         case XPT_SCAN_LUN:
 5265         {
 5266                 cam_status status;
 5267                 struct cam_path *path;
 5268                 xpt_scan_bus_info *scan_info;
 5269                 path_id_t path_id;
 5270                 target_id_t target_id;
 5271                 lun_id_t lun_id;
 5272 
 5273                 /* Reuse the same CCB to query if a device was really found */
 5274                 scan_info = (xpt_scan_bus_info *)request_ccb->ccb_h.ppriv_ptr0;
 5275                 xpt_setup_ccb(&request_ccb->ccb_h, request_ccb->ccb_h.path,
 5276                               request_ccb->ccb_h.pinfo.priority);
 5277                 request_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
 5278 
 5279                 path_id = request_ccb->ccb_h.path_id;
 5280                 target_id = request_ccb->ccb_h.target_id;
 5281                 lun_id = request_ccb->ccb_h.target_lun;
 5282                 xpt_action(request_ccb);
 5283 
 5284                 if (request_ccb->ccb_h.status != CAM_REQ_CMP) {
 5285                         struct cam_ed *device;
 5286                         struct cam_et *target;
 5287                         int phl;
 5288 
 5289                         /*
 5290                          * If we already probed lun 0 successfully, or
 5291                          * we have additional configured luns on this
 5292                          * target that might have "gone away", go onto
 5293                          * the next lun.
 5294                          */
 5295                         target = request_ccb->ccb_h.path->target;
 5296                         /*
 5297                          * We may touch devices that we don't
 5298                          * hold references too, so ensure they
 5299                          * don't disappear out from under us.
 5300                          * The target above is referenced by the
 5301                          * path in the request ccb.
 5302                          */
 5303                         phl = 0;
 5304                         device = TAILQ_FIRST(&target->ed_entries);
 5305                         if (device != NULL) {
 5306                                 phl = CAN_SRCH_HI_SPARSE(device);
 5307                                 if (device->lun_id == 0)
 5308                                         device = TAILQ_NEXT(device, links);
 5309                         }
 5310                         if ((lun_id != 0) || (device != NULL)) {
 5311                                 if (lun_id < (CAM_SCSI2_MAXLUN-1) || phl)
 5312                                         lun_id++;
 5313                         }
 5314                 } else {
 5315                         struct cam_ed *device;
 5316 
 5317                         device = request_ccb->ccb_h.path->device;
 5318 
 5319                         if ((device->quirk->quirks & CAM_QUIRK_NOLUNS) == 0) {
 5320                                 /* Try the next lun */
 5321                                 if (lun_id < (CAM_SCSI2_MAXLUN-1)
 5322                                   || CAN_SRCH_HI_DENSE(device))
 5323                                         lun_id++;
 5324                         }
 5325                 }
 5326 
 5327                 /*
 5328                  * Free the current request path- we're done with it.
 5329                  */
 5330                 xpt_free_path(request_ccb->ccb_h.path);
 5331 
 5332                 /*
 5333                  * Check to see if we scan any further luns.
 5334                  */
 5335                 if (lun_id == request_ccb->ccb_h.target_lun
 5336                  || lun_id > scan_info->cpi->max_lun) {
 5337                         int done;
 5338 
 5339  hop_again:
 5340                         done = 0;
 5341                         if (scan_info->cpi->hba_misc & PIM_SEQSCAN) {
 5342                                 scan_info->counter++;
 5343                                 if (scan_info->counter ==
 5344                                     scan_info->cpi->initiator_id) {
 5345                                         scan_info->counter++;
 5346                                 }
 5347                                 if (scan_info->counter >=
 5348                                     scan_info->cpi->max_target+1) {
 5349                                         done = 1;
 5350                                 }
 5351                         } else {
 5352                                 scan_info->counter--;
 5353                                 if (scan_info->counter == 0) {
 5354                                         done = 1;
 5355                                 }
 5356                         }
 5357                         if (done) {
 5358                                 xpt_free_ccb(request_ccb);
 5359                                 xpt_free_ccb((union ccb *)scan_info->cpi);
 5360                                 request_ccb = scan_info->request_ccb;
 5361                                 free(scan_info, M_CAMXPT);
 5362                                 request_ccb->ccb_h.status = CAM_REQ_CMP;
 5363                                 xpt_done(request_ccb);
 5364                                 break;
 5365                         }
 5366 
 5367                         if ((scan_info->cpi->hba_misc & PIM_SEQSCAN) == 0) {
 5368                                 xpt_free_ccb(request_ccb);
 5369                                 break;
 5370                         }
 5371                         status = xpt_create_path(&path, xpt_periph,
 5372                             scan_info->request_ccb->ccb_h.path_id,
 5373                             scan_info->counter, 0);
 5374                         if (status != CAM_REQ_CMP) {
 5375                                 printf("xpt_scan_bus: xpt_create_path failed"
 5376                                     " with status %#x, bus scan halted\n",
 5377                                     status);
 5378                                 xpt_free_ccb(request_ccb);
 5379                                 xpt_free_ccb((union ccb *)scan_info->cpi);
 5380                                 request_ccb = scan_info->request_ccb;
 5381                                 free(scan_info, M_CAMXPT);
 5382                                 request_ccb->ccb_h.status = status;
 5383                                 xpt_done(request_ccb);
 5384                                 break;
 5385                         }
 5386                         xpt_setup_ccb(&request_ccb->ccb_h, path,
 5387                             request_ccb->ccb_h.pinfo.priority);
 5388                         request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
 5389                         request_ccb->ccb_h.cbfcnp = xpt_scan_bus;
 5390                         request_ccb->ccb_h.ppriv_ptr0 = scan_info;
 5391                         request_ccb->crcn.flags =
 5392                             scan_info->request_ccb->crcn.flags;
 5393                 } else {
 5394                         status = xpt_create_path(&path, xpt_periph,
 5395                                                  path_id, target_id, lun_id);
 5396                         if (status != CAM_REQ_CMP) {
 5397                                 printf("xpt_scan_bus: xpt_create_path failed "
 5398                                        "with status %#x, halting LUN scan\n",
 5399                                        status);
 5400                                 goto hop_again;
 5401                         }
 5402                         xpt_setup_ccb(&request_ccb->ccb_h, path,
 5403                                       request_ccb->ccb_h.pinfo.priority);
 5404                         request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
 5405                         request_ccb->ccb_h.cbfcnp = xpt_scan_bus;
 5406                         request_ccb->ccb_h.ppriv_ptr0 = scan_info;
 5407                         request_ccb->crcn.flags =
 5408                                 scan_info->request_ccb->crcn.flags;
 5409                 }
 5410                 xpt_action(request_ccb);
 5411                 break;
 5412         }
 5413         default:
 5414                 break;
 5415         }
 5416 }
 5417 
 5418 typedef enum {
 5419         PROBE_TUR,
 5420         PROBE_INQUIRY,  /* this counts as DV0 for Basic Domain Validation */
 5421         PROBE_FULL_INQUIRY,
 5422         PROBE_MODE_SENSE,
 5423         PROBE_SERIAL_NUM_0,
 5424         PROBE_SERIAL_NUM_1,
 5425         PROBE_TUR_FOR_NEGOTIATION,
 5426         PROBE_INQUIRY_BASIC_DV1,
 5427         PROBE_INQUIRY_BASIC_DV2,
 5428         PROBE_DV_EXIT,
 5429         PROBE_INVALID
 5430 } probe_action;
 5431 
 5432 static char *probe_action_text[] = {
 5433         "PROBE_TUR",
 5434         "PROBE_INQUIRY",
 5435         "PROBE_FULL_INQUIRY",
 5436         "PROBE_MODE_SENSE",
 5437         "PROBE_SERIAL_NUM_0",
 5438         "PROBE_SERIAL_NUM_1",
 5439         "PROBE_TUR_FOR_NEGOTIATION",
 5440         "PROBE_INQUIRY_BASIC_DV1",
 5441         "PROBE_INQUIRY_BASIC_DV2",
 5442         "PROBE_DV_EXIT",
 5443         "PROBE_INVALID"
 5444 };
 5445 
 5446 #define PROBE_SET_ACTION(softc, newaction)      \
 5447 do {                                                                    \
 5448         char **text;                                                    \
 5449         text = probe_action_text;                                       \
 5450         CAM_DEBUG((softc)->periph->path, CAM_DEBUG_INFO,                \
 5451             ("Probe %s to %s\n", text[(softc)->action],                 \
 5452             text[(newaction)]));                                        \
 5453         (softc)->action = (newaction);                                  \
 5454 } while(0)
 5455 
 5456 typedef enum {
 5457         PROBE_INQUIRY_CKSUM     = 0x01,
 5458         PROBE_SERIAL_CKSUM      = 0x02,
 5459         PROBE_NO_ANNOUNCE       = 0x04
 5460 } probe_flags;
 5461 
 5462 typedef struct {
 5463         TAILQ_HEAD(, ccb_hdr) request_ccbs;
 5464         probe_action    action;
 5465         union ccb       saved_ccb;
 5466         probe_flags     flags;
 5467         MD5_CTX         context;
 5468         u_int8_t        digest[16];
 5469         struct cam_periph *periph;
 5470 } probe_softc;
 5471 
 5472 static void
 5473 xpt_scan_lun(struct cam_periph *periph, struct cam_path *path,
 5474              cam_flags flags, union ccb *request_ccb)
 5475 {
 5476         struct ccb_pathinq cpi;
 5477         cam_status status;
 5478         struct cam_path *new_path;
 5479         struct cam_periph *old_periph;
 5480 
 5481         CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE,
 5482                   ("xpt_scan_lun\n"));
 5483 
 5484         xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
 5485         cpi.ccb_h.func_code = XPT_PATH_INQ;
 5486         xpt_action((union ccb *)&cpi);
 5487 
 5488         if (cpi.ccb_h.status != CAM_REQ_CMP) {
 5489                 if (request_ccb != NULL) {
 5490                         request_ccb->ccb_h.status = cpi.ccb_h.status;
 5491                         xpt_done(request_ccb);
 5492                 }
 5493                 return;
 5494         }
 5495 
 5496         if ((cpi.hba_misc & PIM_NOINITIATOR) != 0) {
 5497                 /*
 5498                  * Can't scan the bus on an adapter that
 5499                  * cannot perform the initiator role.
 5500                  */
 5501                 if (request_ccb != NULL) {
 5502                         request_ccb->ccb_h.status = CAM_REQ_CMP;
 5503                         xpt_done(request_ccb);
 5504                 }
 5505                 return;
 5506         }
 5507 
 5508         if (request_ccb == NULL) {
 5509                 request_ccb = malloc(sizeof(union ccb), M_CAMXPT, M_NOWAIT);
 5510                 if (request_ccb == NULL) {
 5511                         xpt_print(path, "xpt_scan_lun: can't allocate CCB, "
 5512                             "can't continue\n");
 5513                         return;
 5514                 }
 5515                 new_path = malloc(sizeof(*new_path), M_CAMXPT, M_NOWAIT);
 5516                 if (new_path == NULL) {
 5517                         xpt_print(path, "xpt_scan_lun: can't allocate path, "
 5518                             "can't continue\n");
 5519                         free(request_ccb, M_CAMXPT);
 5520                         return;
 5521                 }
 5522                 status = xpt_compile_path(new_path, xpt_periph,
 5523                                           path->bus->path_id,
 5524                                           path->target->target_id,
 5525                                           path->device->lun_id);
 5526 
 5527                 if (status != CAM_REQ_CMP) {
 5528                         xpt_print(path, "xpt_scan_lun: can't compile path, "
 5529                             "can't continue\n");
 5530                         free(request_ccb, M_CAMXPT);
 5531                         free(new_path, M_CAMXPT);
 5532                         return;
 5533                 }
 5534                 xpt_setup_ccb(&request_ccb->ccb_h, new_path, /*priority*/ 1);
 5535                 request_ccb->ccb_h.cbfcnp = xptscandone;
 5536                 request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
 5537                 request_ccb->crcn.flags = flags;
 5538         }
 5539 
 5540         if ((old_periph = cam_periph_find(path, "probe")) != NULL) {
 5541                 probe_softc *softc;
 5542 
 5543                 softc = (probe_softc *)old_periph->softc;
 5544                 TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h,
 5545                                   periph_links.tqe);
 5546         } else {
 5547                 status = cam_periph_alloc(proberegister, NULL, probecleanup,
 5548                                           probestart, "probe",
 5549                                           CAM_PERIPH_BIO,
 5550                                           request_ccb->ccb_h.path, NULL, 0,
 5551                                           request_ccb);
 5552 
 5553                 if (status != CAM_REQ_CMP) {
 5554                         xpt_print(path, "xpt_scan_lun: cam_alloc_periph "
 5555                             "returned an error, can't continue probe\n");
 5556                         request_ccb->ccb_h.status = status;
 5557                         xpt_done(request_ccb);
 5558                 }
 5559         }
 5560 }
 5561 
 5562 static void
 5563 xptscandone(struct cam_periph *periph, union ccb *done_ccb)
 5564 {
 5565         xpt_release_path(done_ccb->ccb_h.path);
 5566         free(done_ccb->ccb_h.path, M_CAMXPT);
 5567         free(done_ccb, M_CAMXPT);
 5568 }
 5569 
 5570 static cam_status
 5571 proberegister(struct cam_periph *periph, void *arg)
 5572 {
 5573         union ccb *request_ccb; /* CCB representing the probe request */
 5574         cam_status status;
 5575         probe_softc *softc;
 5576 
 5577         request_ccb = (union ccb *)arg;
 5578         if (periph == NULL) {
 5579                 printf("proberegister: periph was NULL!!\n");
 5580                 return(CAM_REQ_CMP_ERR);
 5581         }
 5582 
 5583         if (request_ccb == NULL) {
 5584                 printf("proberegister: no probe CCB, "
 5585                        "can't register device\n");
 5586                 return(CAM_REQ_CMP_ERR);
 5587         }
 5588 
 5589         softc = (probe_softc *)malloc(sizeof(*softc), M_CAMXPT, M_NOWAIT);
 5590 
 5591         if (softc == NULL) {
 5592                 printf("proberegister: Unable to probe new device. "
 5593                        "Unable to allocate softc\n");
 5594                 return(CAM_REQ_CMP_ERR);
 5595         }
 5596         TAILQ_INIT(&softc->request_ccbs);
 5597         TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h,
 5598                           periph_links.tqe);
 5599         softc->flags = 0;
 5600         periph->softc = softc;
 5601         softc->periph = periph;
 5602         softc->action = PROBE_INVALID;
 5603         status = cam_periph_acquire(periph);
 5604         if (status != CAM_REQ_CMP) {
 5605                 return (status);
 5606         }
 5607 
 5608 
 5609         /*
 5610          * Ensure we've waited at least a bus settle
 5611          * delay before attempting to probe the device.
 5612          * For HBAs that don't do bus resets, this won't make a difference.
 5613          */
 5614         cam_periph_freeze_after_event(periph, &periph->path->bus->last_reset,
 5615                                       scsi_delay);
 5616         probeschedule(periph);
 5617         return(CAM_REQ_CMP);
 5618 }
 5619 
 5620 static void
 5621 probeschedule(struct cam_periph *periph)
 5622 {
 5623         struct ccb_pathinq cpi;
 5624         union ccb *ccb;
 5625         probe_softc *softc;
 5626 
 5627         softc = (probe_softc *)periph->softc;
 5628         ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs);
 5629 
 5630         xpt_setup_ccb(&cpi.ccb_h, periph->path, /*priority*/1);
 5631         cpi.ccb_h.func_code = XPT_PATH_INQ;
 5632         xpt_action((union ccb *)&cpi);
 5633 
 5634         /*
 5635          * If a device has gone away and another device, or the same one,
 5636          * is back in the same place, it should have a unit attention
 5637          * condition pending.  It will not report the unit attention in
 5638          * response to an inquiry, which may leave invalid transfer
 5639          * negotiations in effect.  The TUR will reveal the unit attention
 5640          * condition.  Only send the TUR for lun 0, since some devices
 5641          * will get confused by commands other than inquiry to non-existent
 5642          * luns.  If you think a device has gone away start your scan from
 5643          * lun 0.  This will insure that any bogus transfer settings are
 5644          * invalidated.
 5645          *
 5646          * If we haven't seen the device before and the controller supports
 5647          * some kind of transfer negotiation, negotiate with the first
 5648          * sent command if no bus reset was performed at startup.  This
 5649          * ensures that the device is not confused by transfer negotiation
 5650          * settings left over by loader or BIOS action.
 5651          */
 5652         if (((ccb->ccb_h.path->device->flags & CAM_DEV_UNCONFIGURED) == 0)
 5653          && (ccb->ccb_h.target_lun == 0)) {
 5654                 PROBE_SET_ACTION(softc, PROBE_TUR);
 5655         } else if ((cpi.hba_inquiry & (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE)) != 0
 5656               && (cpi.hba_misc & PIM_NOBUSRESET) != 0) {
 5657                 proberequestdefaultnegotiation(periph);
 5658                 PROBE_SET_ACTION(softc, PROBE_INQUIRY);
 5659         } else {
 5660                 PROBE_SET_ACTION(softc, PROBE_INQUIRY);
 5661         }
 5662 
 5663         if (ccb->crcn.flags & CAM_EXPECT_INQ_CHANGE)
 5664                 softc->flags |= PROBE_NO_ANNOUNCE;
 5665         else
 5666                 softc->flags &= ~PROBE_NO_ANNOUNCE;
 5667 
 5668         xpt_schedule(periph, ccb->ccb_h.pinfo.priority);
 5669 }
 5670 
 5671 static void
 5672 probestart(struct cam_periph *periph, union ccb *start_ccb)
 5673 {
 5674         /* Probe the device that our peripheral driver points to */
 5675         struct ccb_scsiio *csio;
 5676         probe_softc *softc;
 5677 
 5678         CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probestart\n"));
 5679 
 5680         softc = (probe_softc *)periph->softc;
 5681         csio = &start_ccb->csio;
 5682 
 5683         switch (softc->action) {
 5684         case PROBE_TUR:
 5685         case PROBE_TUR_FOR_NEGOTIATION:
 5686         case PROBE_DV_EXIT:
 5687         {
 5688                 scsi_test_unit_ready(csio,
 5689                                      /*retries*/10,
 5690                                      probedone,
 5691                                      MSG_SIMPLE_Q_TAG,
 5692                                      SSD_FULL_SIZE,
 5693                                      /*timeout*/60000);
 5694                 break;
 5695         }
 5696         case PROBE_INQUIRY:
 5697         case PROBE_FULL_INQUIRY:
 5698         case PROBE_INQUIRY_BASIC_DV1:
 5699         case PROBE_INQUIRY_BASIC_DV2:
 5700         {
 5701                 u_int inquiry_len;
 5702                 struct scsi_inquiry_data *inq_buf;
 5703 
 5704                 inq_buf = &periph->path->device->inq_data;
 5705 
 5706                 /*
 5707                  * If the device is currently configured, we calculate an
 5708                  * MD5 checksum of the inquiry data, and if the serial number
 5709                  * length is greater than 0, add the serial number data
 5710                  * into the checksum as well.  Once the inquiry and the
 5711                  * serial number check finish, we attempt to figure out
 5712                  * whether we still have the same device.
 5713                  */
 5714                 if ((periph->path->device->flags & CAM_DEV_UNCONFIGURED) == 0) {
 5715 
 5716                         MD5Init(&softc->context);
 5717                         MD5Update(&softc->context, (unsigned char *)inq_buf,
 5718                                   sizeof(struct scsi_inquiry_data));
 5719                         softc->flags |= PROBE_INQUIRY_CKSUM;
 5720                         if (periph->path->device->serial_num_len > 0) {
 5721                                 MD5Update(&softc->context,
 5722                                           periph->path->device->serial_num,
 5723                                           periph->path->device->serial_num_len);
 5724                                 softc->flags |= PROBE_SERIAL_CKSUM;
 5725                         }
 5726                         MD5Final(softc->digest, &softc->context);
 5727                 }
 5728 
 5729                 if (softc->action == PROBE_INQUIRY)
 5730                         inquiry_len = SHORT_INQUIRY_LENGTH;
 5731                 else
 5732                         inquiry_len = SID_ADDITIONAL_LENGTH(inq_buf);
 5733 
 5734                 /*
 5735                  * Some parallel SCSI devices fail to send an
 5736                  * ignore wide residue message when dealing with
 5737                  * odd length inquiry requests.  Round up to be
 5738                  * safe.
 5739                  */
 5740                 inquiry_len = roundup2(inquiry_len, 2);
 5741 
 5742                 if (softc->action == PROBE_INQUIRY_BASIC_DV1
 5743                  || softc->action == PROBE_INQUIRY_BASIC_DV2) {
 5744                         inq_buf = malloc(inquiry_len, M_CAMXPT, M_NOWAIT);
 5745                 }
 5746                 if (inq_buf == NULL) {
 5747                         xpt_print(periph->path, "malloc failure- skipping Basic"
 5748                             "Domain Validation\n");
 5749                         PROBE_SET_ACTION(softc, PROBE_DV_EXIT);
 5750                         scsi_test_unit_ready(csio,
 5751                                              /*retries*/4,
 5752                                              probedone,
 5753                                              MSG_SIMPLE_Q_TAG,
 5754                                              SSD_FULL_SIZE,
 5755                                              /*timeout*/60000);
 5756                         break;
 5757                 }
 5758                 scsi_inquiry(csio,
 5759                              /*retries*/4,
 5760                              probedone,
 5761                              MSG_SIMPLE_Q_TAG,
 5762                              (u_int8_t *)inq_buf,
 5763                              inquiry_len,
 5764                              /*evpd*/FALSE,
 5765                              /*page_code*/0,
 5766                              SSD_MIN_SIZE,
 5767                              /*timeout*/60 * 1000);
 5768                 break;
 5769         }
 5770         case PROBE_MODE_SENSE:
 5771         {
 5772                 void  *mode_buf;
 5773                 int    mode_buf_len;
 5774 
 5775                 mode_buf_len = sizeof(struct scsi_mode_header_6)
 5776                              + sizeof(struct scsi_mode_blk_desc)
 5777                              + sizeof(struct scsi_control_page);
 5778                 mode_buf = malloc(mode_buf_len, M_CAMXPT, M_NOWAIT);
 5779                 if (mode_buf != NULL) {
 5780                         scsi_mode_sense(csio,
 5781                                         /*retries*/4,
 5782                                         probedone,
 5783                                         MSG_SIMPLE_Q_TAG,
 5784                                         /*dbd*/FALSE,
 5785                                         SMS_PAGE_CTRL_CURRENT,
 5786                                         SMS_CONTROL_MODE_PAGE,
 5787                                         mode_buf,
 5788                                         mode_buf_len,
 5789                                         SSD_FULL_SIZE,
 5790                                         /*timeout*/60000);
 5791                         break;
 5792                 }
 5793                 xpt_print(periph->path, "Unable to mode sense control page - "
 5794                     "malloc failure\n");
 5795                 PROBE_SET_ACTION(softc, PROBE_SERIAL_NUM_0);
 5796         }
 5797         /* FALLTHROUGH */
 5798         case PROBE_SERIAL_NUM_0:
 5799         {
 5800                 struct scsi_vpd_supported_page_list *vpd_list = NULL;
 5801                 struct cam_ed *device;
 5802 
 5803                 device = periph->path->device;
 5804                 if ((device->quirk->quirks & CAM_QUIRK_NOSERIAL) == 0) {
 5805                         vpd_list = malloc(sizeof(*vpd_list), M_CAMXPT,
 5806                             M_NOWAIT | M_ZERO);
 5807                 }
 5808 
 5809                 if (vpd_list != NULL) {
 5810                         scsi_inquiry(csio,
 5811                                      /*retries*/4,
 5812                                      probedone,
 5813                                      MSG_SIMPLE_Q_TAG,
 5814                                      (u_int8_t *)vpd_list,
 5815                                      sizeof(*vpd_list),
 5816                                      /*evpd*/TRUE,
 5817                                      SVPD_SUPPORTED_PAGE_LIST,
 5818                                      SSD_MIN_SIZE,
 5819                                      /*timeout*/60 * 1000);
 5820                         break;
 5821                 }
 5822                 /*
 5823                  * We'll have to do without, let our probedone
 5824                  * routine finish up for us.
 5825                  */
 5826                 start_ccb->csio.data_ptr = NULL;
 5827                 probedone(periph, start_ccb);
 5828                 return;
 5829         }
 5830         case PROBE_SERIAL_NUM_1:
 5831         {
 5832                 struct scsi_vpd_unit_serial_number *serial_buf;
 5833                 struct cam_ed* device;
 5834 
 5835                 serial_buf = NULL;
 5836                 device = periph->path->device;
 5837                 if (device->serial_num != NULL) {
 5838                         free(device->serial_num, M_CAMXPT);
 5839                         device->serial_num = NULL;
 5840                         device->serial_num_len = 0;
 5841                 }
 5842 
 5843                 serial_buf = (struct scsi_vpd_unit_serial_number *)
 5844                         malloc(sizeof(*serial_buf), M_CAMXPT, M_NOWAIT|M_ZERO);
 5845 
 5846                 if (serial_buf != NULL) {
 5847                         scsi_inquiry(csio,
 5848                                      /*retries*/4,
 5849                                      probedone,
 5850                                      MSG_SIMPLE_Q_TAG,
 5851                                      (u_int8_t *)serial_buf,
 5852                                      sizeof(*serial_buf),
 5853                                      /*evpd*/TRUE,
 5854                                      SVPD_UNIT_SERIAL_NUMBER,
 5855                                      SSD_MIN_SIZE,
 5856                                      /*timeout*/60 * 1000);
 5857                         break;
 5858                 }
 5859                 /*
 5860                  * We'll have to do without, let our probedone
 5861                  * routine finish up for us.
 5862                  */
 5863                 start_ccb->csio.data_ptr = NULL;
 5864                 probedone(periph, start_ccb);
 5865                 return;
 5866         }
 5867         case PROBE_INVALID:
 5868                 CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_INFO,
 5869                     ("probestart: invalid action state\n"));
 5870         default:
 5871                 break;
 5872         }
 5873         xpt_action(start_ccb);
 5874 }
 5875 
 5876 static void
 5877 proberequestdefaultnegotiation(struct cam_periph *periph)
 5878 {
 5879         struct ccb_trans_settings cts;
 5880 
 5881         xpt_setup_ccb(&cts.ccb_h, periph->path, /*priority*/1);
 5882         cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
 5883         cts.type = CTS_TYPE_USER_SETTINGS;
 5884         xpt_action((union ccb *)&cts);
 5885         if ((cts.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
 5886                 return;
 5887         }
 5888         cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
 5889         cts.type = CTS_TYPE_CURRENT_SETTINGS;
 5890         xpt_action((union ccb *)&cts);
 5891 }
 5892 
 5893 /*
 5894  * Backoff Negotiation Code- only pertinent for SPI devices.
 5895  */
 5896 static int
 5897 proberequestbackoff(struct cam_periph *periph, struct cam_ed *device)
 5898 {
 5899         struct ccb_trans_settings cts;
 5900         struct ccb_trans_settings_spi *spi;
 5901 
 5902         memset(&cts, 0, sizeof (cts));
 5903         xpt_setup_ccb(&cts.ccb_h, periph->path, /*priority*/1);
 5904         cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
 5905         cts.type = CTS_TYPE_CURRENT_SETTINGS;
 5906         xpt_action((union ccb *)&cts);
 5907         if ((cts.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
 5908                 if (bootverbose) {
 5909                         xpt_print(periph->path,
 5910                             "failed to get current device settings\n");
 5911                 }
 5912                 return (0);
 5913         }
 5914         if (cts.transport != XPORT_SPI) {
 5915                 if (bootverbose) {
 5916                         xpt_print(periph->path, "not SPI transport\n");
 5917                 }
 5918                 return (0);
 5919         }
 5920         spi = &cts.xport_specific.spi;
 5921 
 5922         /*
 5923          * We cannot renegotiate sync rate if we don't have one.
 5924          */
 5925         if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0) {
 5926                 if (bootverbose) {
 5927                         xpt_print(periph->path, "no sync rate known\n");
 5928                 }
 5929                 return (0);
 5930         }
 5931 
 5932         /*
 5933          * We'll assert that we don't have to touch PPR options- the
 5934          * SIM will see what we do with period and offset and adjust
 5935          * the PPR options as appropriate.
 5936          */
 5937 
 5938         /*
 5939          * A sync rate with unknown or zero offset is nonsensical.
 5940          * A sync period of zero means Async.
 5941          */
 5942         if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0
 5943          || spi->sync_offset == 0 || spi->sync_period == 0) {
 5944                 if (bootverbose) {
 5945                         xpt_print(periph->path, "no sync rate available\n");
 5946                 }
 5947                 return (0);
 5948         }
 5949 
 5950         if (device->flags & CAM_DEV_DV_HIT_BOTTOM) {
 5951                 CAM_DEBUG(periph->path, CAM_DEBUG_INFO,
 5952                     ("hit async: giving up on DV\n"));
 5953                 return (0);
 5954         }
 5955 
 5956 
 5957         /*
 5958          * Jump sync_period up by one, but stop at 5MHz and fall back to Async.
 5959          * We don't try to remember 'last' settings to see if the SIM actually
 5960          * gets into the speed we want to set. We check on the SIM telling
 5961          * us that a requested speed is bad, but otherwise don't try and
 5962          * check the speed due to the asynchronous and handshake nature
 5963          * of speed setting.
 5964          */
 5965         spi->valid = CTS_SPI_VALID_SYNC_RATE | CTS_SPI_VALID_SYNC_OFFSET;
 5966         for (;;) {
 5967                 spi->sync_period++;
 5968                 if (spi->sync_period >= 0xf) {
 5969                         spi->sync_period = 0;
 5970                         spi->sync_offset = 0;
 5971                         CAM_DEBUG(periph->path, CAM_DEBUG_INFO,
 5972                             ("setting to async for DV\n"));
 5973                         /*
 5974                          * Once we hit async, we don't want to try
 5975                          * any more settings.
 5976                          */
 5977                         device->flags |= CAM_DEV_DV_HIT_BOTTOM;
 5978                 } else if (bootverbose) {
 5979                         CAM_DEBUG(periph->path, CAM_DEBUG_INFO,
 5980                             ("DV: period 0x%x\n", spi->sync_period));
 5981                         printf("setting period to 0x%x\n", spi->sync_period);
 5982                 }
 5983                 cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
 5984                 cts.type = CTS_TYPE_CURRENT_SETTINGS;
 5985                 xpt_action((union ccb *)&cts);
 5986                 if ((cts.ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
 5987                         break;
 5988                 }
 5989                 CAM_DEBUG(periph->path, CAM_DEBUG_INFO,
 5990                     ("DV: failed to set period 0x%x\n", spi->sync_period));
 5991                 if (spi->sync_period == 0) {
 5992                         return (0);
 5993                 }
 5994         }
 5995         return (1);
 5996 }
 5997 
 5998 static void
 5999 probedone(struct cam_periph *periph, union ccb *done_ccb)
 6000 {
 6001         probe_softc *softc;
 6002         struct cam_path *path;
 6003         u_int32_t  priority;
 6004 
 6005         CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probedone\n"));
 6006 
 6007         softc = (probe_softc *)periph->softc;
 6008         path = done_ccb->ccb_h.path;
 6009         priority = done_ccb->ccb_h.pinfo.priority;
 6010 
 6011         switch (softc->action) {
 6012         case PROBE_TUR:
 6013         {
 6014                 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
 6015 
 6016                         if (cam_periph_error(done_ccb, 0,
 6017                                              SF_NO_PRINT, NULL) == ERESTART)
 6018                                 return;
 6019                         else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
 6020                                 /* Don't wedge the queue */
 6021                                 xpt_release_devq(done_ccb->ccb_h.path,
 6022                                                  /*count*/1,
 6023                                                  /*run_queue*/TRUE);
 6024                 }
 6025                 PROBE_SET_ACTION(softc, PROBE_INQUIRY);
 6026                 xpt_release_ccb(done_ccb);
 6027                 xpt_schedule(periph, priority);
 6028                 return;
 6029         }
 6030         case PROBE_INQUIRY:
 6031         case PROBE_FULL_INQUIRY:
 6032         {
 6033                 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
 6034                         struct scsi_inquiry_data *inq_buf;
 6035                         u_int8_t periph_qual;
 6036 
 6037                         path->device->flags |= CAM_DEV_INQUIRY_DATA_VALID;
 6038                         inq_buf = &path->device->inq_data;
 6039 
 6040                         periph_qual = SID_QUAL(inq_buf);
 6041 
 6042                         switch(periph_qual) {
 6043                         case SID_QUAL_LU_CONNECTED:
 6044                         {
 6045                                 u_int8_t len;
 6046 
 6047                                 /*
 6048                                  * We conservatively request only
 6049                                  * SHORT_INQUIRY_LEN bytes of inquiry
 6050                                  * information during our first try
 6051                                  * at sending an INQUIRY. If the device
 6052                                  * has more information to give,
 6053                                  * perform a second request specifying
 6054                                  * the amount of information the device
 6055                                  * is willing to give.
 6056                                  */
 6057                                 len = inq_buf->additional_length
 6058                                     + offsetof(struct scsi_inquiry_data,
 6059                                                additional_length) + 1;
 6060                                 if (softc->action == PROBE_INQUIRY
 6061                                     && len > SHORT_INQUIRY_LENGTH) {
 6062                                         PROBE_SET_ACTION(softc, PROBE_FULL_INQUIRY);
 6063                                         xpt_release_ccb(done_ccb);
 6064                                         xpt_schedule(periph, priority);
 6065                                         return;
 6066                                 }
 6067 
 6068                                 xpt_find_quirk(path->device);
 6069 
 6070                                 xpt_devise_transport(path);
 6071                                 if (INQ_DATA_TQ_ENABLED(inq_buf))
 6072                                         PROBE_SET_ACTION(softc, PROBE_MODE_SENSE);
 6073                                 else
 6074                                         PROBE_SET_ACTION(softc, PROBE_SERIAL_NUM_0);
 6075 
 6076                                 path->device->flags &= ~CAM_DEV_UNCONFIGURED;
 6077 
 6078                                 xpt_release_ccb(done_ccb);
 6079                                 xpt_schedule(periph, priority);
 6080                                 return;
 6081                         }
 6082                         default:
 6083                                 break;
 6084                         }
 6085                 } else if (cam_periph_error(done_ccb, 0,
 6086                                             done_ccb->ccb_h.target_lun > 0
 6087                                             ? SF_RETRY_UA|SF_QUIET_IR
 6088                                             : SF_RETRY_UA,
 6089                                             &softc->saved_ccb) == ERESTART) {
 6090                         return;
 6091                 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
 6092                         /* Don't wedge the queue */
 6093                         xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
 6094                                          /*run_queue*/TRUE);
 6095                 }
 6096                 /*
 6097                  * If we get to this point, we got an error status back
 6098                  * from the inquiry and the error status doesn't require
 6099                  * automatically retrying the command.  Therefore, the
 6100                  * inquiry failed.  If we had inquiry information before
 6101                  * for this device, but this latest inquiry command failed,
 6102                  * the device has probably gone away.  If this device isn't
 6103                  * already marked unconfigured, notify the peripheral
 6104                  * drivers that this device is no more.
 6105                  */
 6106                 if ((path->device->flags & CAM_DEV_UNCONFIGURED) == 0)
 6107                         /* Send the async notification. */
 6108                         xpt_async(AC_LOST_DEVICE, path, NULL);
 6109 
 6110                 xpt_release_ccb(done_ccb);
 6111                 break;
 6112         }
 6113         case PROBE_MODE_SENSE:
 6114         {
 6115                 struct ccb_scsiio *csio;
 6116                 struct scsi_mode_header_6 *mode_hdr;
 6117 
 6118                 csio = &done_ccb->csio;
 6119                 mode_hdr = (struct scsi_mode_header_6 *)csio->data_ptr;
 6120                 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
 6121                         struct scsi_control_page *page;
 6122                         u_int8_t *offset;
 6123 
 6124                         offset = ((u_int8_t *)&mode_hdr[1])
 6125                             + mode_hdr->blk_desc_len;
 6126                         page = (struct scsi_control_page *)offset;
 6127                         path->device->queue_flags = page->queue_flags;
 6128                 } else if (cam_periph_error(done_ccb, 0,
 6129                                             SF_RETRY_UA|SF_NO_PRINT,
 6130                                             &softc->saved_ccb) == ERESTART) {
 6131                         return;
 6132                 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
 6133                         /* Don't wedge the queue */
 6134                         xpt_release_devq(done_ccb->ccb_h.path,
 6135                                          /*count*/1, /*run_queue*/TRUE);
 6136                 }
 6137                 xpt_release_ccb(done_ccb);
 6138                 free(mode_hdr, M_CAMXPT);
 6139                 PROBE_SET_ACTION(softc, PROBE_SERIAL_NUM_0);
 6140                 xpt_schedule(periph, priority);
 6141                 return;
 6142         }
 6143         case PROBE_SERIAL_NUM_0:
 6144         {
 6145                 struct ccb_scsiio *csio;
 6146                 struct scsi_vpd_supported_page_list *page_list;
 6147                 int length, serialnum_supported, i;
 6148 
 6149                 serialnum_supported = 0;
 6150                 csio = &done_ccb->csio;
 6151                 page_list =
 6152                     (struct scsi_vpd_supported_page_list *)csio->data_ptr;
 6153 
 6154                 if (page_list == NULL) {
 6155                         /*
 6156                          * Don't process the command as it was never sent
 6157                          */
 6158                 } else if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP
 6159                     && (page_list->length > 0)) {
 6160                         length = min(page_list->length,
 6161                             SVPD_SUPPORTED_PAGES_SIZE);
 6162                         for (i = 0; i < length; i++) {
 6163                                 if (page_list->list[i] ==
 6164                                     SVPD_UNIT_SERIAL_NUMBER) {
 6165                                         serialnum_supported = 1;
 6166                                         break;
 6167                                 }
 6168                         }
 6169                 } else if (cam_periph_error(done_ccb, 0,
 6170                                             SF_RETRY_UA|SF_NO_PRINT,
 6171                                             &softc->saved_ccb) == ERESTART) {
 6172                         return;
 6173                 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
 6174                         /* Don't wedge the queue */
 6175                         xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
 6176                                          /*run_queue*/TRUE);
 6177                 }
 6178 
 6179                 if (page_list != NULL)
 6180                         free(page_list, M_CAMXPT);
 6181 
 6182                 if (serialnum_supported) {
 6183                         xpt_release_ccb(done_ccb);
 6184                         PROBE_SET_ACTION(softc, PROBE_SERIAL_NUM_1);
 6185                         xpt_schedule(periph, priority);
 6186                         return;
 6187                 }
 6188 
 6189                 csio->data_ptr = NULL;
 6190                 /* FALLTHROUGH */
 6191         }
 6192 
 6193         case PROBE_SERIAL_NUM_1:
 6194         {
 6195                 struct ccb_scsiio *csio;
 6196                 struct scsi_vpd_unit_serial_number *serial_buf;
 6197                 u_int32_t  priority;
 6198                 int changed;
 6199                 int have_serialnum;
 6200 
 6201                 changed = 1;
 6202                 have_serialnum = 0;
 6203                 csio = &done_ccb->csio;
 6204                 priority = done_ccb->ccb_h.pinfo.priority;
 6205                 serial_buf =
 6206                     (struct scsi_vpd_unit_serial_number *)csio->data_ptr;
 6207 
 6208                 /* Clean up from previous instance of this device */
 6209                 if (path->device->serial_num != NULL) {
 6210                         free(path->device->serial_num, M_CAMXPT);
 6211                         path->device->serial_num = NULL;
 6212                         path->device->serial_num_len = 0;
 6213                 }
 6214 
 6215                 if (serial_buf == NULL) {
 6216                         /*
 6217                          * Don't process the command as it was never sent
 6218                          */
 6219                 } else if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP
 6220                         && (serial_buf->length > 0)) {
 6221 
 6222                         have_serialnum = 1;
 6223                         path->device->serial_num =
 6224                                 (u_int8_t *)malloc((serial_buf->length + 1),
 6225                                                    M_CAMXPT, M_NOWAIT);
 6226                         if (path->device->serial_num != NULL) {
 6227                                 bcopy(serial_buf->serial_num,
 6228                                       path->device->serial_num,
 6229                                       serial_buf->length);
 6230                                 path->device->serial_num_len =
 6231                                     serial_buf->length;
 6232                                 path->device->serial_num[serial_buf->length]
 6233                                     = '\0';
 6234                         }
 6235                 } else if (cam_periph_error(done_ccb, 0,
 6236                                             SF_RETRY_UA|SF_NO_PRINT,
 6237                                             &softc->saved_ccb) == ERESTART) {
 6238                         return;
 6239                 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
 6240                         /* Don't wedge the queue */
 6241                         xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
 6242                                          /*run_queue*/TRUE);
 6243                 }
 6244 
 6245                 /*
 6246                  * Let's see if we have seen this device before.
 6247                  */
 6248                 if ((softc->flags & PROBE_INQUIRY_CKSUM) != 0) {
 6249                         MD5_CTX context;
 6250                         u_int8_t digest[16];
 6251 
 6252                         MD5Init(&context);
 6253 
 6254                         MD5Update(&context,
 6255                                   (unsigned char *)&path->device->inq_data,
 6256                                   sizeof(struct scsi_inquiry_data));
 6257 
 6258                         if (have_serialnum)
 6259                                 MD5Update(&context, serial_buf->serial_num,
 6260                                           serial_buf->length);
 6261 
 6262                         MD5Final(digest, &context);
 6263                         if (bcmp(softc->digest, digest, 16) == 0)
 6264                                 changed = 0;
 6265 
 6266                         /*
 6267                          * XXX Do we need to do a TUR in order to ensure
 6268                          *     that the device really hasn't changed???
 6269                          */
 6270                         if ((changed != 0)
 6271                          && ((softc->flags & PROBE_NO_ANNOUNCE) == 0))
 6272                                 xpt_async(AC_LOST_DEVICE, path, NULL);
 6273                 }
 6274                 if (serial_buf != NULL)
 6275                         free(serial_buf, M_CAMXPT);
 6276 
 6277                 if (changed != 0) {
 6278                         /*
 6279                          * Now that we have all the necessary
 6280                          * information to safely perform transfer
 6281                          * negotiations... Controllers don't perform
 6282                          * any negotiation or tagged queuing until
 6283                          * after the first XPT_SET_TRAN_SETTINGS ccb is
 6284                          * received.  So, on a new device, just retrieve
 6285                          * the user settings, and set them as the current
 6286                          * settings to set the device up.
 6287                          */
 6288                         proberequestdefaultnegotiation(periph);
 6289                         xpt_release_ccb(done_ccb);
 6290 
 6291                         /*
 6292                          * Perform a TUR to allow the controller to
 6293                          * perform any necessary transfer negotiation.
 6294                          */
 6295                         PROBE_SET_ACTION(softc, PROBE_TUR_FOR_NEGOTIATION);
 6296                         xpt_schedule(periph, priority);
 6297                         return;
 6298                 }
 6299                 xpt_release_ccb(done_ccb);
 6300                 break;
 6301         }
 6302         case PROBE_TUR_FOR_NEGOTIATION:
 6303                 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
 6304                         DELAY(500000);
 6305                         if (cam_periph_error(done_ccb, 0, SF_RETRY_UA,
 6306                             NULL) == ERESTART)
 6307                                 return;
 6308                 }
 6309         /* FALLTHROUGH */
 6310         case PROBE_DV_EXIT:
 6311                 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
 6312                         /* Don't wedge the queue */
 6313                         xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
 6314                                          /*run_queue*/TRUE);
 6315                 }
 6316                 /*
 6317                  * Do Domain Validation for lun 0 on devices that claim
 6318                  * to support Synchronous Transfer modes.
 6319                  */
 6320                 if (softc->action == PROBE_TUR_FOR_NEGOTIATION
 6321                  && done_ccb->ccb_h.target_lun == 0
 6322                  && (path->device->inq_data.flags & SID_Sync) != 0
 6323                  && (path->device->flags & CAM_DEV_IN_DV) == 0) {
 6324                         CAM_DEBUG(periph->path, CAM_DEBUG_INFO,
 6325                             ("Begin Domain Validation\n"));
 6326                         path->device->flags |= CAM_DEV_IN_DV;
 6327                         xpt_release_ccb(done_ccb);
 6328                         PROBE_SET_ACTION(softc, PROBE_INQUIRY_BASIC_DV1);
 6329                         xpt_schedule(periph, priority);
 6330                         return;
 6331                 }
 6332                 if (softc->action == PROBE_DV_EXIT) {
 6333                         CAM_DEBUG(periph->path, CAM_DEBUG_INFO,
 6334                             ("Leave Domain Validation\n"));
 6335                 }
 6336                 path->device->flags &=
 6337                     ~(CAM_DEV_UNCONFIGURED|CAM_DEV_IN_DV|CAM_DEV_DV_HIT_BOTTOM);
 6338                 if ((softc->flags & PROBE_NO_ANNOUNCE) == 0) {
 6339                         /* Inform the XPT that a new device has been found */
 6340                         done_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
 6341                         xpt_action(done_ccb);
 6342                         xpt_async(AC_FOUND_DEVICE, done_ccb->ccb_h.path,
 6343                                   done_ccb);
 6344                 }
 6345                 xpt_release_ccb(done_ccb);
 6346                 break;
 6347         case PROBE_INQUIRY_BASIC_DV1:
 6348         case PROBE_INQUIRY_BASIC_DV2:
 6349         {
 6350                 struct scsi_inquiry_data *nbuf;
 6351                 struct ccb_scsiio *csio;
 6352 
 6353                 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
 6354                         /* Don't wedge the queue */
 6355                         xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
 6356                                          /*run_queue*/TRUE);
 6357                 }
 6358                 csio = &done_ccb->csio;
 6359                 nbuf = (struct scsi_inquiry_data *)csio->data_ptr;
 6360                 if (bcmp(nbuf, &path->device->inq_data, SHORT_INQUIRY_LENGTH)) {
 6361                         xpt_print(path,
 6362                             "inquiry data fails comparison at DV%d step\n",
 6363                             softc->action == PROBE_INQUIRY_BASIC_DV1 ? 1 : 2);
 6364                         if (proberequestbackoff(periph, path->device)) {
 6365                                 path->device->flags &= ~CAM_DEV_IN_DV;
 6366                                 PROBE_SET_ACTION(softc, PROBE_TUR_FOR_NEGOTIATION);
 6367                         } else {
 6368                                 /* give up */
 6369                                 PROBE_SET_ACTION(softc, PROBE_DV_EXIT);
 6370                         }
 6371                         free(nbuf, M_CAMXPT);
 6372                         xpt_release_ccb(done_ccb);
 6373                         xpt_schedule(periph, priority);
 6374                         return;
 6375                 }
 6376                 free(nbuf, M_CAMXPT);
 6377                 if (softc->action == PROBE_INQUIRY_BASIC_DV1) {
 6378                         PROBE_SET_ACTION(softc, PROBE_INQUIRY_BASIC_DV2);
 6379                         xpt_release_ccb(done_ccb);
 6380                         xpt_schedule(periph, priority);
 6381                         return;
 6382                 }
 6383                 if (softc->action == PROBE_INQUIRY_BASIC_DV2) {
 6384                         CAM_DEBUG(periph->path, CAM_DEBUG_INFO,
 6385                             ("Leave Domain Validation Successfully\n"));
 6386                 }
 6387                 path->device->flags &=
 6388                     ~(CAM_DEV_UNCONFIGURED|CAM_DEV_IN_DV|CAM_DEV_DV_HIT_BOTTOM);
 6389                 if ((softc->flags & PROBE_NO_ANNOUNCE) == 0) {
 6390                         /* Inform the XPT that a new device has been found */
 6391                         done_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
 6392                         xpt_action(done_ccb);
 6393                         xpt_async(AC_FOUND_DEVICE, done_ccb->ccb_h.path,
 6394                                   done_ccb);
 6395                 }
 6396                 xpt_release_ccb(done_ccb);
 6397                 break;
 6398         }
 6399         case PROBE_INVALID:
 6400                 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_INFO,
 6401                     ("probedone: invalid action state\n"));
 6402         default:
 6403                 break;
 6404         }
 6405         done_ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs);
 6406         TAILQ_REMOVE(&softc->request_ccbs, &done_ccb->ccb_h, periph_links.tqe);
 6407         done_ccb->ccb_h.status = CAM_REQ_CMP;
 6408         xpt_done(done_ccb);
 6409         if (TAILQ_FIRST(&softc->request_ccbs) == NULL) {
 6410                 cam_periph_invalidate(periph);
 6411                 cam_periph_release_locked(periph);
 6412         } else {
 6413                 probeschedule(periph);
 6414         }
 6415 }
 6416 
 6417 static void
 6418 probecleanup(struct cam_periph *periph)
 6419 {
 6420         free(periph->softc, M_CAMXPT);
 6421 }
 6422 
 6423 static void
 6424 xpt_find_quirk(struct cam_ed *device)
 6425 {
 6426         caddr_t match;
 6427 
 6428         match = cam_quirkmatch((caddr_t)&device->inq_data,
 6429                                (caddr_t)xpt_quirk_table,
 6430                                sizeof(xpt_quirk_table)/sizeof(*xpt_quirk_table),
 6431                                sizeof(*xpt_quirk_table), scsi_inquiry_match);
 6432 
 6433         if (match == NULL)
 6434                 panic("xpt_find_quirk: device didn't match wildcard entry!!");
 6435 
 6436         device->quirk = (struct xpt_quirk_entry *)match;
 6437 }
 6438 
 6439 static int
 6440 sysctl_cam_search_luns(SYSCTL_HANDLER_ARGS)
 6441 {
 6442         int error, val;
 6443 
 6444         val = cam_srch_hi;
 6445         error = sysctl_handle_int(oidp, &val, 0, req);
 6446         if (error != 0 || req->newptr == NULL)
 6447                 return (error);
 6448         if (val == 0 || val == 1) {
 6449                 cam_srch_hi = val;
 6450                 return (0);
 6451         } else {
 6452                 return (EINVAL);
 6453         }
 6454 }
 6455 
 6456 
 6457 static void
 6458 xpt_devise_transport(struct cam_path *path)
 6459 {
 6460         struct ccb_pathinq cpi;
 6461         struct ccb_trans_settings cts;
 6462         struct scsi_inquiry_data *inq_buf;
 6463 
 6464         /* Get transport information from the SIM */
 6465         xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
 6466         cpi.ccb_h.func_code = XPT_PATH_INQ;
 6467         xpt_action((union ccb *)&cpi);
 6468 
 6469         inq_buf = NULL;
 6470         if ((path->device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0)
 6471                 inq_buf = &path->device->inq_data;
 6472         path->device->protocol = PROTO_SCSI;
 6473         path->device->protocol_version =
 6474             inq_buf != NULL ? SID_ANSI_REV(inq_buf) : cpi.protocol_version;
 6475         path->device->transport = cpi.transport;
 6476         path->device->transport_version = cpi.transport_version;
 6477 
 6478         /*
 6479          * Any device not using SPI3 features should
 6480          * be considered SPI2 or lower.
 6481          */
 6482         if (inq_buf != NULL) {
 6483                 if (path->device->transport == XPORT_SPI
 6484                  && (inq_buf->spi3data & SID_SPI_MASK) == 0
 6485                  && path->device->transport_version > 2)
 6486                         path->device->transport_version = 2;
 6487         } else {
 6488                 struct cam_ed* otherdev;
 6489 
 6490                 for (otherdev = TAILQ_FIRST(&path->target->ed_entries);
 6491                      otherdev != NULL;
 6492                      otherdev = TAILQ_NEXT(otherdev, links)) {
 6493                         if (otherdev != path->device)
 6494                                 break;
 6495                 }
 6496 
 6497                 if (otherdev != NULL) {
 6498                         /*
 6499                          * Initially assume the same versioning as
 6500                          * prior luns for this target.
 6501                          */
 6502                         path->device->protocol_version =
 6503                             otherdev->protocol_version;
 6504                         path->device->transport_version =
 6505                             otherdev->transport_version;
 6506                 } else {
 6507                         /* Until we know better, opt for safty */
 6508                         path->device->protocol_version = 2;
 6509                         if (path->device->transport == XPORT_SPI)
 6510                                 path->device->transport_version = 2;
 6511                         else
 6512                                 path->device->transport_version = 0;
 6513                 }
 6514         }
 6515 
 6516         /*
 6517          * XXX
 6518          * For a device compliant with SPC-2 we should be able
 6519          * to determine the transport version supported by
 6520          * scrutinizing the version descriptors in the
 6521          * inquiry buffer.
 6522          */
 6523 
 6524         /* Tell the controller what we think */
 6525         xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1);
 6526         cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
 6527         cts.type = CTS_TYPE_CURRENT_SETTINGS;
 6528         cts.transport = path->device->transport;
 6529         cts.transport_version = path->device->transport_version;
 6530         cts.protocol = path->device->protocol;
 6531         cts.protocol_version = path->device->protocol_version;
 6532         cts.proto_specific.valid = 0;
 6533         cts.xport_specific.valid = 0;
 6534         xpt_action((union ccb *)&cts);
 6535 }
 6536 
 6537 static void
 6538 xpt_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_ed *device,
 6539                           int async_update)
 6540 {
 6541         struct  ccb_pathinq cpi;
 6542         struct  ccb_trans_settings cur_cts;
 6543         struct  ccb_trans_settings_scsi *scsi;
 6544         struct  ccb_trans_settings_scsi *cur_scsi;
 6545         struct  cam_sim *sim;
 6546         struct  scsi_inquiry_data *inq_data;
 6547 
 6548         if (device == NULL) {
 6549                 cts->ccb_h.status = CAM_PATH_INVALID;
 6550                 xpt_done((union ccb *)cts);
 6551                 return;
 6552         }
 6553 
 6554         if (cts->protocol == PROTO_UNKNOWN
 6555          || cts->protocol == PROTO_UNSPECIFIED) {
 6556                 cts->protocol = device->protocol;
 6557                 cts->protocol_version = device->protocol_version;
 6558         }
 6559 
 6560         if (cts->protocol_version == PROTO_VERSION_UNKNOWN
 6561          || cts->protocol_version == PROTO_VERSION_UNSPECIFIED)
 6562                 cts->protocol_version = device->protocol_version;
 6563 
 6564         if (cts->protocol != device->protocol) {
 6565                 xpt_print(cts->ccb_h.path, "Uninitialized Protocol %x:%x?\n",
 6566                        cts->protocol, device->protocol);
 6567                 cts->protocol = device->protocol;
 6568         }
 6569 
 6570         if (cts->protocol_version > device->protocol_version) {
 6571                 if (bootverbose) {
 6572                         xpt_print(cts->ccb_h.path, "Down reving Protocol "
 6573                             "Version from %d to %d?\n", cts->protocol_version,
 6574                             device->protocol_version);
 6575                 }
 6576                 cts->protocol_version = device->protocol_version;
 6577         }
 6578 
 6579         if (cts->transport == XPORT_UNKNOWN
 6580          || cts->transport == XPORT_UNSPECIFIED) {
 6581                 cts->transport = device->transport;
 6582                 cts->transport_version = device->transport_version;
 6583         }
 6584 
 6585         if (cts->transport_version == XPORT_VERSION_UNKNOWN
 6586          || cts->transport_version == XPORT_VERSION_UNSPECIFIED)
 6587                 cts->transport_version = device->transport_version;
 6588 
 6589         if (cts->transport != device->transport) {
 6590                 xpt_print(cts->ccb_h.path, "Uninitialized Transport %x:%x?\n",
 6591                     cts->transport, device->transport);
 6592                 cts->transport = device->transport;
 6593         }
 6594 
 6595         if (cts->transport_version > device->transport_version) {
 6596                 if (bootverbose) {
 6597                         xpt_print(cts->ccb_h.path, "Down reving Transport "
 6598                             "Version from %d to %d?\n", cts->transport_version,
 6599                             device->transport_version);
 6600                 }
 6601                 cts->transport_version = device->transport_version;
 6602         }
 6603 
 6604         sim = cts->ccb_h.path->bus->sim;
 6605 
 6606         /*
 6607          * Nothing more of interest to do unless
 6608          * this is a device connected via the
 6609          * SCSI protocol.
 6610          */
 6611         if (cts->protocol != PROTO_SCSI) {
 6612                 if (async_update == FALSE)
 6613                         (*(sim->sim_action))(sim, (union ccb *)cts);
 6614                 return;
 6615         }
 6616 
 6617         inq_data = &device->inq_data;
 6618         scsi = &cts->proto_specific.scsi;
 6619         xpt_setup_ccb(&cpi.ccb_h, cts->ccb_h.path, /*priority*/1);
 6620         cpi.ccb_h.func_code = XPT_PATH_INQ;
 6621         xpt_action((union ccb *)&cpi);
 6622 
 6623         /* SCSI specific sanity checking */
 6624         if ((cpi.hba_inquiry & PI_TAG_ABLE) == 0
 6625          || (INQ_DATA_TQ_ENABLED(inq_data)) == 0
 6626          || (device->queue_flags & SCP_QUEUE_DQUE) != 0
 6627          || (device->quirk->mintags == 0)) {
 6628                 /*
 6629                  * Can't tag on hardware that doesn't support tags,
 6630                  * doesn't have it enabled, or has broken tag support.
 6631                  */
 6632                 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
 6633         }
 6634 
 6635         if (async_update == FALSE) {
 6636                 /*
 6637                  * Perform sanity checking against what the
 6638                  * controller and device can do.
 6639                  */
 6640                 xpt_setup_ccb(&cur_cts.ccb_h, cts->ccb_h.path, /*priority*/1);
 6641                 cur_cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
 6642                 cur_cts.type = cts->type;
 6643                 xpt_action((union ccb *)&cur_cts);
 6644                 if ((cur_cts.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
 6645                         return;
 6646                 }
 6647                 cur_scsi = &cur_cts.proto_specific.scsi;
 6648                 if ((scsi->valid & CTS_SCSI_VALID_TQ) == 0) {
 6649                         scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
 6650                         scsi->flags |= cur_scsi->flags & CTS_SCSI_FLAGS_TAG_ENB;
 6651                 }
 6652                 if ((cur_scsi->valid & CTS_SCSI_VALID_TQ) == 0)
 6653                         scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
 6654         }
 6655 
 6656         /* SPI specific sanity checking */
 6657         if (cts->transport == XPORT_SPI && async_update == FALSE) {
 6658                 u_int spi3caps;
 6659                 struct ccb_trans_settings_spi *spi;
 6660                 struct ccb_trans_settings_spi *cur_spi;
 6661 
 6662                 spi = &cts->xport_specific.spi;
 6663 
 6664                 cur_spi = &cur_cts.xport_specific.spi;
 6665 
 6666                 /* Fill in any gaps in what the user gave us */
 6667                 if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0)
 6668                         spi->sync_period = cur_spi->sync_period;
 6669                 if ((cur_spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0)
 6670                         spi->sync_period = 0;
 6671                 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0)
 6672                         spi->sync_offset = cur_spi->sync_offset;
 6673                 if ((cur_spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0)
 6674                         spi->sync_offset = 0;
 6675                 if ((spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0)
 6676                         spi->ppr_options = cur_spi->ppr_options;
 6677                 if ((cur_spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0)
 6678                         spi->ppr_options = 0;
 6679                 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) == 0)
 6680                         spi->bus_width = cur_spi->bus_width;
 6681                 if ((cur_spi->valid & CTS_SPI_VALID_BUS_WIDTH) == 0)
 6682                         spi->bus_width = 0;
 6683                 if ((spi->valid & CTS_SPI_VALID_DISC) == 0) {
 6684                         spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
 6685                         spi->flags |= cur_spi->flags & CTS_SPI_FLAGS_DISC_ENB;
 6686                 }
 6687                 if ((cur_spi->valid & CTS_SPI_VALID_DISC) == 0)
 6688                         spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
 6689                 if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0
 6690                   && (inq_data->flags & SID_Sync) == 0
 6691                   && cts->type == CTS_TYPE_CURRENT_SETTINGS)
 6692                  || ((cpi.hba_inquiry & PI_SDTR_ABLE) == 0)) {
 6693                         /* Force async */
 6694                         spi->sync_period = 0;
 6695                         spi->sync_offset = 0;
 6696                 }
 6697 
 6698                 switch (spi->bus_width) {
 6699                 case MSG_EXT_WDTR_BUS_32_BIT:
 6700                         if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
 6701                           || (inq_data->flags & SID_WBus32) != 0
 6702                           || cts->type == CTS_TYPE_USER_SETTINGS)
 6703                          && (cpi.hba_inquiry & PI_WIDE_32) != 0)
 6704                                 break;
 6705                         /* Fall Through to 16-bit */
 6706                 case MSG_EXT_WDTR_BUS_16_BIT:
 6707                         if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
 6708                           || (inq_data->flags & SID_WBus16) != 0
 6709                           || cts->type == CTS_TYPE_USER_SETTINGS)
 6710                          && (cpi.hba_inquiry & PI_WIDE_16) != 0) {
 6711                                 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
 6712                                 break;
 6713                         }
 6714                         /* Fall Through to 8-bit */
 6715                 default: /* New bus width?? */
 6716                 case MSG_EXT_WDTR_BUS_8_BIT:
 6717                         /* All targets can do this */
 6718                         spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
 6719                         break;
 6720                 }
 6721 
 6722                 spi3caps = cpi.xport_specific.spi.ppr_options;
 6723                 if ((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0
 6724                  && cts->type == CTS_TYPE_CURRENT_SETTINGS)
 6725                         spi3caps &= inq_data->spi3data;
 6726 
 6727                 if ((spi3caps & SID_SPI_CLOCK_DT) == 0)
 6728                         spi->ppr_options &= ~MSG_EXT_PPR_DT_REQ;
 6729 
 6730                 if ((spi3caps & SID_SPI_IUS) == 0)
 6731                         spi->ppr_options &= ~MSG_EXT_PPR_IU_REQ;
 6732 
 6733                 if ((spi3caps & SID_SPI_QAS) == 0)
 6734                         spi->ppr_options &= ~MSG_EXT_PPR_QAS_REQ;
 6735 
 6736                 /* No SPI Transfer settings are allowed unless we are wide */
 6737                 if (spi->bus_width == 0)
 6738                         spi->ppr_options = 0;
 6739 
 6740                 if ((spi->valid & CTS_SPI_VALID_DISC)
 6741                  && ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) == 0)) {
 6742                         /*
 6743                          * Can't tag queue without disconnection.
 6744                          */
 6745                         scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
 6746                         scsi->valid |= CTS_SCSI_VALID_TQ;
 6747                 }
 6748 
 6749                 /*
 6750                  * If we are currently performing tagged transactions to
 6751                  * this device and want to change its negotiation parameters,
 6752                  * go non-tagged for a bit to give the controller a chance to
 6753                  * negotiate unhampered by tag messages.
 6754                  */
 6755                 if (cts->type == CTS_TYPE_CURRENT_SETTINGS
 6756                  && (device->inq_flags & SID_CmdQue) != 0
 6757                  && (scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0
 6758                  && (spi->flags & (CTS_SPI_VALID_SYNC_RATE|
 6759                                    CTS_SPI_VALID_SYNC_OFFSET|
 6760                                    CTS_SPI_VALID_BUS_WIDTH)) != 0)
 6761                         xpt_toggle_tags(cts->ccb_h.path);
 6762         }
 6763 
 6764         if (cts->type == CTS_TYPE_CURRENT_SETTINGS
 6765          && (scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
 6766                 int device_tagenb;
 6767 
 6768                 /*
 6769                  * If we are transitioning from tags to no-tags or
 6770                  * vice-versa, we need to carefully freeze and restart
 6771                  * the queue so that we don't overlap tagged and non-tagged
 6772                  * commands.  We also temporarily stop tags if there is
 6773                  * a change in transfer negotiation settings to allow
 6774                  * "tag-less" negotiation.
 6775                  */
 6776                 if ((device->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
 6777                  || (device->inq_flags & SID_CmdQue) != 0)
 6778                         device_tagenb = TRUE;
 6779                 else
 6780                         device_tagenb = FALSE;
 6781 
 6782                 if (((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0
 6783                   && device_tagenb == FALSE)
 6784                  || ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) == 0
 6785                   && device_tagenb == TRUE)) {
 6786 
 6787                         if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) {
 6788                                 /*
 6789                                  * Delay change to use tags until after a
 6790                                  * few commands have gone to this device so
 6791                                  * the controller has time to perform transfer
 6792                                  * negotiations without tagged messages getting
 6793                                  * in the way.
 6794                                  */
 6795                                 device->tag_delay_count = CAM_TAG_DELAY_COUNT;
 6796                                 device->flags |= CAM_DEV_TAG_AFTER_COUNT;
 6797                         } else {
 6798                                 struct ccb_relsim crs;
 6799 
 6800                                 xpt_freeze_devq(cts->ccb_h.path, /*count*/1);
 6801                                 device->inq_flags &= ~SID_CmdQue;
 6802                                 xpt_dev_ccbq_resize(cts->ccb_h.path,
 6803                                                     sim->max_dev_openings);
 6804                                 device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
 6805                                 device->tag_delay_count = 0;
 6806 
 6807                                 xpt_setup_ccb(&crs.ccb_h, cts->ccb_h.path,
 6808                                               /*priority*/1);
 6809                                 crs.ccb_h.func_code = XPT_REL_SIMQ;
 6810                                 crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
 6811                                 crs.openings
 6812                                     = crs.release_timeout
 6813                                     = crs.qfrozen_cnt
 6814                                     = 0;
 6815                                 xpt_action((union ccb *)&crs);
 6816                         }
 6817                 }
 6818         }
 6819         if (async_update == FALSE)
 6820                 (*(sim->sim_action))(sim, (union ccb *)cts);
 6821 }
 6822 
 6823 
 6824 static void
 6825 xpt_toggle_tags(struct cam_path *path)
 6826 {
 6827         struct cam_ed *dev;
 6828 
 6829         /*
 6830          * Give controllers a chance to renegotiate
 6831          * before starting tag operations.  We
 6832          * "toggle" tagged queuing off then on
 6833          * which causes the tag enable command delay
 6834          * counter to come into effect.
 6835          */
 6836         dev = path->device;
 6837         if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
 6838          || ((dev->inq_flags & SID_CmdQue) != 0
 6839           && (dev->inq_flags & (SID_Sync|SID_WBus16|SID_WBus32)) != 0)) {
 6840                 struct ccb_trans_settings cts;
 6841 
 6842                 xpt_setup_ccb(&cts.ccb_h, path, 1);
 6843                 cts.protocol = PROTO_SCSI;
 6844                 cts.protocol_version = PROTO_VERSION_UNSPECIFIED;
 6845                 cts.transport = XPORT_UNSPECIFIED;
 6846                 cts.transport_version = XPORT_VERSION_UNSPECIFIED;
 6847                 cts.proto_specific.scsi.flags = 0;
 6848                 cts.proto_specific.scsi.valid = CTS_SCSI_VALID_TQ;
 6849                 xpt_set_transfer_settings(&cts, path->device,
 6850                                           /*async_update*/TRUE);
 6851                 cts.proto_specific.scsi.flags = CTS_SCSI_FLAGS_TAG_ENB;
 6852                 xpt_set_transfer_settings(&cts, path->device,
 6853                                           /*async_update*/TRUE);
 6854         }
 6855 }
 6856 
 6857 static void
 6858 xpt_start_tags(struct cam_path *path)
 6859 {
 6860         struct ccb_relsim crs;
 6861         struct cam_ed *device;
 6862         struct cam_sim *sim;
 6863         int    newopenings;
 6864 
 6865         device = path->device;
 6866         sim = path->bus->sim;
 6867         device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
 6868         xpt_freeze_devq(path, /*count*/1);
 6869         device->inq_flags |= SID_CmdQue;
 6870         if (device->tag_saved_openings != 0)
 6871                 newopenings = device->tag_saved_openings;
 6872         else
 6873                 newopenings = min(device->quirk->maxtags,
 6874                                   sim->max_tagged_dev_openings);
 6875         xpt_dev_ccbq_resize(path, newopenings);
 6876         xpt_setup_ccb(&crs.ccb_h, path, /*priority*/1);
 6877         crs.ccb_h.func_code = XPT_REL_SIMQ;
 6878         crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
 6879         crs.openings
 6880             = crs.release_timeout
 6881             = crs.qfrozen_cnt
 6882             = 0;
 6883         xpt_action((union ccb *)&crs);
 6884 }
 6885 
 6886 static int busses_to_config;
 6887 static int busses_to_reset;
 6888 
 6889 static int
 6890 xptconfigbuscountfunc(struct cam_eb *bus, void *arg)
 6891 {
 6892 
 6893         mtx_assert(bus->sim->mtx, MA_OWNED);
 6894 
 6895         if (bus->path_id != CAM_XPT_PATH_ID) {
 6896                 struct cam_path path;
 6897                 struct ccb_pathinq cpi;
 6898                 int can_negotiate;
 6899 
 6900                 busses_to_config++;
 6901                 xpt_compile_path(&path, NULL, bus->path_id,
 6902                                  CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
 6903                 xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
 6904                 cpi.ccb_h.func_code = XPT_PATH_INQ;
 6905                 xpt_action((union ccb *)&cpi);
 6906                 can_negotiate = cpi.hba_inquiry;
 6907                 can_negotiate &= (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE);
 6908                 if ((cpi.hba_misc & PIM_NOBUSRESET) == 0
 6909                  && can_negotiate)
 6910                         busses_to_reset++;
 6911                 xpt_release_path(&path);
 6912         }
 6913 
 6914         return(1);
 6915 }
 6916 
 6917 static int
 6918 xptconfigfunc(struct cam_eb *bus, void *arg)
 6919 {
 6920         struct  cam_path *path;
 6921         union   ccb *work_ccb;
 6922 
 6923         mtx_assert(bus->sim->mtx, MA_OWNED);
 6924 
 6925         if (bus->path_id != CAM_XPT_PATH_ID) {
 6926                 cam_status status;
 6927                 int can_negotiate;
 6928 
 6929                 work_ccb = xpt_alloc_ccb_nowait();
 6930                 if (work_ccb == NULL) {
 6931                         busses_to_config--;
 6932                         xpt_finishconfig(xpt_periph, NULL);
 6933                         return(0);
 6934                 }
 6935                 if ((status = xpt_create_path(&path, xpt_periph, bus->path_id,
 6936                                               CAM_TARGET_WILDCARD,
 6937                                               CAM_LUN_WILDCARD)) !=CAM_REQ_CMP){
 6938                         printf("xptconfigfunc: xpt_create_path failed with "
 6939                                "status %#x for bus %d\n", status, bus->path_id);
 6940                         printf("xptconfigfunc: halting bus configuration\n");
 6941                         xpt_free_ccb(work_ccb);
 6942                         busses_to_config--;
 6943                         xpt_finishconfig(xpt_periph, NULL);
 6944                         return(0);
 6945                 }
 6946                 xpt_setup_ccb(&work_ccb->ccb_h, path, /*priority*/1);
 6947                 work_ccb->ccb_h.func_code = XPT_PATH_INQ;
 6948                 xpt_action(work_ccb);
 6949                 if (work_ccb->ccb_h.status != CAM_REQ_CMP) {
 6950                         printf("xptconfigfunc: CPI failed on bus %d "
 6951                                "with status %d\n", bus->path_id,
 6952                                work_ccb->ccb_h.status);
 6953                         xpt_finishconfig(xpt_periph, work_ccb);
 6954                         return(1);
 6955                 }
 6956 
 6957                 can_negotiate = work_ccb->cpi.hba_inquiry;
 6958                 can_negotiate &= (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE);
 6959                 if ((work_ccb->cpi.hba_misc & PIM_NOBUSRESET) == 0
 6960                  && (can_negotiate != 0)) {
 6961                         xpt_setup_ccb(&work_ccb->ccb_h, path, /*priority*/1);
 6962                         work_ccb->ccb_h.func_code = XPT_RESET_BUS;
 6963                         work_ccb->ccb_h.cbfcnp = NULL;
 6964                         CAM_DEBUG(path, CAM_DEBUG_SUBTRACE,
 6965                                   ("Resetting Bus\n"));
 6966                         xpt_action(work_ccb);
 6967                         xpt_finishconfig(xpt_periph, work_ccb);
 6968                 } else {
 6969                         /* Act as though we performed a successful BUS RESET */
 6970                         work_ccb->ccb_h.func_code = XPT_RESET_BUS;
 6971                         xpt_finishconfig(xpt_periph, work_ccb);
 6972                 }
 6973         }
 6974 
 6975         return(1);
 6976 }
 6977 
 6978 static void
 6979 xpt_config(void *arg)
 6980 {
 6981         /*
 6982          * Now that interrupts are enabled, go find our devices
 6983          */
 6984 
 6985 #ifdef CAMDEBUG
 6986         /* Setup debugging flags and path */
 6987 #ifdef CAM_DEBUG_FLAGS
 6988         cam_dflags = CAM_DEBUG_FLAGS;
 6989 #else /* !CAM_DEBUG_FLAGS */
 6990         cam_dflags = CAM_DEBUG_NONE;
 6991 #endif /* CAM_DEBUG_FLAGS */
 6992 #ifdef CAM_DEBUG_BUS
 6993         if (cam_dflags != CAM_DEBUG_NONE) {
 6994                 /*
 6995                  * Locking is specifically omitted here.  No SIMs have
 6996                  * registered yet, so xpt_create_path will only be searching
 6997                  * empty lists of targets and devices.
 6998                  */
 6999                 if (xpt_create_path(&cam_dpath, xpt_periph,
 7000                                     CAM_DEBUG_BUS, CAM_DEBUG_TARGET,
 7001                                     CAM_DEBUG_LUN) != CAM_REQ_CMP) {
 7002                         printf("xpt_config: xpt_create_path() failed for debug"
 7003                                " target %d:%d:%d, debugging disabled\n",
 7004                                CAM_DEBUG_BUS, CAM_DEBUG_TARGET, CAM_DEBUG_LUN);
 7005                         cam_dflags = CAM_DEBUG_NONE;
 7006                 }
 7007         } else
 7008                 cam_dpath = NULL;
 7009 #else /* !CAM_DEBUG_BUS */
 7010         cam_dpath = NULL;
 7011 #endif /* CAM_DEBUG_BUS */
 7012 #endif /* CAMDEBUG */
 7013 
 7014         /*
 7015          * Scan all installed busses.
 7016          */
 7017         xpt_for_all_busses(xptconfigbuscountfunc, NULL);
 7018 
 7019         if (busses_to_config == 0) {
 7020                 /* Call manually because we don't have any busses */
 7021                 xpt_finishconfig(xpt_periph, NULL);
 7022         } else  {
 7023                 if (busses_to_reset > 0 && scsi_delay >= 2000) {
 7024                         printf("Waiting %d seconds for SCSI "
 7025                                "devices to settle\n", scsi_delay/1000);
 7026                 }
 7027                 xpt_for_all_busses(xptconfigfunc, NULL);
 7028         }
 7029 }
 7030 
 7031 /*
 7032  * If the given device only has one peripheral attached to it, and if that
 7033  * peripheral is the passthrough driver, announce it.  This insures that the
 7034  * user sees some sort of announcement for every peripheral in their system.
 7035  */
 7036 static int
 7037 xptpassannouncefunc(struct cam_ed *device, void *arg)
 7038 {
 7039         struct cam_periph *periph;
 7040         int i;
 7041 
 7042         for (periph = SLIST_FIRST(&device->periphs), i = 0; periph != NULL;
 7043              periph = SLIST_NEXT(periph, periph_links), i++);
 7044 
 7045         periph = SLIST_FIRST(&device->periphs);
 7046         if ((i == 1)
 7047          && (strncmp(periph->periph_name, "pass", 4) == 0))
 7048                 xpt_announce_periph(periph, NULL);
 7049 
 7050         return(1);
 7051 }
 7052 
 7053 static void
 7054 xpt_finishconfig_task(void *context, int pending)
 7055 {
 7056         struct  periph_driver **p_drv;
 7057         int     i;
 7058 
 7059         if (busses_to_config == 0) {
 7060                 /* Register all the peripheral drivers */
 7061                 /* XXX This will have to change when we have loadable modules */
 7062                 p_drv = periph_drivers;
 7063                 for (i = 0; p_drv[i] != NULL; i++) {
 7064                         (*p_drv[i]->init)();
 7065                 }
 7066 
 7067                 /*
 7068                  * Check for devices with no "standard" peripheral driver
 7069                  * attached.  For any devices like that, announce the
 7070                  * passthrough driver so the user will see something.
 7071                  */
 7072                 xpt_for_all_devices(xptpassannouncefunc, NULL);
 7073 
 7074                 /* Release our hook so that the boot can continue. */
 7075                 config_intrhook_disestablish(xsoftc.xpt_config_hook);
 7076                 free(xsoftc.xpt_config_hook, M_CAMXPT);
 7077                 xsoftc.xpt_config_hook = NULL;
 7078         }
 7079 
 7080         free(context, M_CAMXPT);
 7081 }
 7082 
 7083 static void
 7084 xpt_finishconfig(struct cam_periph *periph, union ccb *done_ccb)
 7085 {
 7086         struct  xpt_task *task;
 7087 
 7088         if (done_ccb != NULL) {
 7089                 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE,
 7090                           ("xpt_finishconfig\n"));
 7091                 switch(done_ccb->ccb_h.func_code) {
 7092                 case XPT_RESET_BUS:
 7093                         if (done_ccb->ccb_h.status == CAM_REQ_CMP) {
 7094                                 done_ccb->ccb_h.func_code = XPT_SCAN_BUS;
 7095                                 done_ccb->ccb_h.cbfcnp = xpt_finishconfig;
 7096                                 done_ccb->crcn.flags = 0;
 7097                                 xpt_action(done_ccb);
 7098                                 return;
 7099                         }
 7100                         /* FALLTHROUGH */
 7101                 case XPT_SCAN_BUS:
 7102                 default:
 7103                         xpt_free_path(done_ccb->ccb_h.path);
 7104                         busses_to_config--;
 7105                         break;
 7106                 }
 7107         }
 7108 
 7109         if (busses_to_config == 0) {
 7110                 task = malloc(sizeof(struct xpt_task), M_CAMXPT, M_NOWAIT);
 7111                 if (task != NULL) {
 7112                         TASK_INIT(&task->task, 0, xpt_finishconfig_task, task);
 7113                         taskqueue_enqueue(taskqueue_thread, &task->task);
 7114                 }
 7115         }
 7116 
 7117         if (done_ccb != NULL)
 7118                 xpt_free_ccb(done_ccb);
 7119 }
 7120 
 7121 cam_status
 7122 xpt_register_async(int event, ac_callback_t *cbfunc, void *cbarg,
 7123                    struct cam_path *path)
 7124 {
 7125         struct ccb_setasync csa;
 7126         cam_status status;
 7127         int xptpath = 0;
 7128 
 7129         if (path == NULL) {
 7130                 mtx_lock(&xsoftc.xpt_lock);
 7131                 status = xpt_create_path(&path, /*periph*/NULL, CAM_XPT_PATH_ID,
 7132                                          CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
 7133                 if (status != CAM_REQ_CMP) {
 7134                         mtx_unlock(&xsoftc.xpt_lock);
 7135                         return (status);
 7136                 }
 7137                 xptpath = 1;
 7138         }
 7139 
 7140         xpt_setup_ccb(&csa.ccb_h, path, /*priority*/5);
 7141         csa.ccb_h.func_code = XPT_SASYNC_CB;
 7142         csa.event_enable = event;
 7143         csa.callback = cbfunc;
 7144         csa.callback_arg = cbarg;
 7145         xpt_action((union ccb *)&csa);
 7146         status = csa.ccb_h.status;
 7147         if (xptpath) {
 7148                 xpt_free_path(path);
 7149                 mtx_unlock(&xsoftc.xpt_lock);
 7150         }
 7151         return (status);
 7152 }
 7153 
 7154 static void
 7155 xptaction(struct cam_sim *sim, union ccb *work_ccb)
 7156 {
 7157         CAM_DEBUG(work_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xptaction\n"));
 7158 
 7159         switch (work_ccb->ccb_h.func_code) {
 7160         /* Common cases first */
 7161         case XPT_PATH_INQ:              /* Path routing inquiry */
 7162         {
 7163                 struct ccb_pathinq *cpi;
 7164 
 7165                 cpi = &work_ccb->cpi;
 7166                 cpi->version_num = 1; /* XXX??? */
 7167                 cpi->hba_inquiry = 0;
 7168                 cpi->target_sprt = 0;
 7169                 cpi->hba_misc = 0;
 7170                 cpi->hba_eng_cnt = 0;
 7171                 cpi->max_target = 0;
 7172                 cpi->max_lun = 0;
 7173                 cpi->initiator_id = 0;
 7174                 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
 7175                 strncpy(cpi->hba_vid, "", HBA_IDLEN);
 7176                 strncpy(cpi->dev_name, sim->sim_name, DEV_IDLEN);
 7177                 cpi->unit_number = sim->unit_number;
 7178                 cpi->bus_id = sim->bus_id;
 7179                 cpi->base_transfer_speed = 0;
 7180                 cpi->protocol = PROTO_UNSPECIFIED;
 7181                 cpi->protocol_version = PROTO_VERSION_UNSPECIFIED;
 7182                 cpi->transport = XPORT_UNSPECIFIED;
 7183                 cpi->transport_version = XPORT_VERSION_UNSPECIFIED;
 7184                 cpi->ccb_h.status = CAM_REQ_CMP;
 7185                 xpt_done(work_ccb);
 7186                 break;
 7187         }
 7188         default:
 7189                 work_ccb->ccb_h.status = CAM_REQ_INVALID;
 7190                 xpt_done(work_ccb);
 7191                 break;
 7192         }
 7193 }
 7194 
 7195 /*
 7196  * The xpt as a "controller" has no interrupt sources, so polling
 7197  * is a no-op.
 7198  */
 7199 static void
 7200 xptpoll(struct cam_sim *sim)
 7201 {
 7202 }
 7203 
 7204 void
 7205 xpt_lock_buses(void)
 7206 {
 7207         mtx_lock(&xsoftc.xpt_topo_lock);
 7208 }
 7209 
 7210 void
 7211 xpt_unlock_buses(void)
 7212 {
 7213         mtx_unlock(&xsoftc.xpt_topo_lock);
 7214 }
 7215 
 7216 static void
 7217 camisr(void *dummy)
 7218 {
 7219         cam_simq_t queue;
 7220         struct cam_sim *sim;
 7221 
 7222         mtx_lock(&cam_simq_lock);
 7223         TAILQ_INIT(&queue);
 7224         TAILQ_CONCAT(&queue, &cam_simq, links);
 7225         mtx_unlock(&cam_simq_lock);
 7226 
 7227         while ((sim = TAILQ_FIRST(&queue)) != NULL) {
 7228                 TAILQ_REMOVE(&queue, sim, links);
 7229                 CAM_SIM_LOCK(sim);
 7230                 camisr_runqueue(&sim->sim_doneq);
 7231                 sim->flags &= ~CAM_SIM_ON_DONEQ;
 7232                 CAM_SIM_UNLOCK(sim);
 7233         }
 7234 }
 7235 
 7236 static void
 7237 camisr_runqueue(void *V_queue)
 7238 {
 7239         cam_isrq_t *queue = V_queue;
 7240         struct  ccb_hdr *ccb_h;
 7241 
 7242         while ((ccb_h = TAILQ_FIRST(queue)) != NULL) {
 7243                 int     runq;
 7244 
 7245                 TAILQ_REMOVE(queue, ccb_h, sim_links.tqe);
 7246                 ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
 7247 
 7248                 CAM_DEBUG(ccb_h->path, CAM_DEBUG_TRACE,
 7249                           ("camisr\n"));
 7250 
 7251                 runq = FALSE;
 7252 
 7253                 if (ccb_h->flags & CAM_HIGH_POWER) {
 7254                         struct highpowerlist    *hphead;
 7255                         union ccb               *send_ccb;
 7256 
 7257                         mtx_lock(&xsoftc.xpt_lock);
 7258                         hphead = &xsoftc.highpowerq;
 7259 
 7260                         send_ccb = (union ccb *)STAILQ_FIRST(hphead);
 7261 
 7262                         /*
 7263                          * Increment the count since this command is done.
 7264                          */
 7265                         xsoftc.num_highpower++;
 7266 
 7267                         /*
 7268                          * Any high powered commands queued up?
 7269                          */
 7270                         if (send_ccb != NULL) {
 7271 
 7272                                 STAILQ_REMOVE_HEAD(hphead, xpt_links.stqe);
 7273                                 mtx_unlock(&xsoftc.xpt_lock);
 7274 
 7275                                 xpt_release_devq(send_ccb->ccb_h.path,
 7276                                                  /*count*/1, /*runqueue*/TRUE);
 7277                         } else
 7278                                 mtx_unlock(&xsoftc.xpt_lock);
 7279                 }
 7280 
 7281                 if ((ccb_h->func_code & XPT_FC_USER_CCB) == 0) {
 7282                         struct cam_ed *dev;
 7283 
 7284                         dev = ccb_h->path->device;
 7285 
 7286                         cam_ccbq_ccb_done(&dev->ccbq, (union ccb *)ccb_h);
 7287                         ccb_h->path->bus->sim->devq->send_active--;
 7288                         ccb_h->path->bus->sim->devq->send_openings++;
 7289 
 7290                         if (((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0
 7291                           && (ccb_h->status&CAM_STATUS_MASK) != CAM_REQUEUE_REQ)
 7292                          || ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
 7293                           && (dev->ccbq.dev_active == 0))) {
 7294 
 7295                                 xpt_release_devq(ccb_h->path, /*count*/1,
 7296                                                  /*run_queue*/TRUE);
 7297                         }
 7298 
 7299                         if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
 7300                          && (--dev->tag_delay_count == 0))
 7301                                 xpt_start_tags(ccb_h->path);
 7302 
 7303                         if ((dev->ccbq.queue.entries > 0)
 7304                          && (dev->qfrozen_cnt == 0)
 7305                          && (device_is_send_queued(dev) == 0)) {
 7306                                 runq = xpt_schedule_dev_sendq(ccb_h->path->bus,
 7307                                                               dev);
 7308                         }
 7309                 }
 7310 
 7311                 if (ccb_h->status & CAM_RELEASE_SIMQ) {
 7312                         xpt_release_simq(ccb_h->path->bus->sim,
 7313                                          /*run_queue*/TRUE);
 7314                         ccb_h->status &= ~CAM_RELEASE_SIMQ;
 7315                         runq = FALSE;
 7316                 }
 7317 
 7318                 if ((ccb_h->flags & CAM_DEV_QFRZDIS)
 7319                  && (ccb_h->status & CAM_DEV_QFRZN)) {
 7320                         xpt_release_devq(ccb_h->path, /*count*/1,
 7321                                          /*run_queue*/TRUE);
 7322                         ccb_h->status &= ~CAM_DEV_QFRZN;
 7323                 } else if (runq) {
 7324                         xpt_run_dev_sendq(ccb_h->path->bus);
 7325                 }
 7326 
 7327                 /* Call the peripheral driver's callback */
 7328                 (*ccb_h->cbfcnp)(ccb_h->path->periph, (union ccb *)ccb_h);
 7329         }
 7330 }
 7331 

Cache object: 0a10d0e5350f834c9bfe677f0c11cbd4


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.