FreeBSD/Linux Kernel Cross Reference
sys/cam/cam_xpt.c
1 /*
2 * Implementation of the Common Access Method Transport (XPT) layer.
3 *
4 * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs.
5 * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions, and the following disclaimer,
13 * without modification, immediately at the beginning of the file.
14 * 2. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * $FreeBSD$
30 */
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/types.h>
34 #include <sys/malloc.h>
35 #include <sys/device.h>
36 #include <sys/kernel.h>
37 #include <sys/time.h>
38 #include <sys/conf.h>
39 #include <sys/fcntl.h>
40 #include <sys/md5.h>
41 #include <sys/devicestat.h>
42 #include <sys/interrupt.h>
43
44 #ifdef PC98
45 #include <pc98/pc98/pc98_machdep.h> /* geometry translation */
46 #endif
47
48 #include <machine/clock.h>
49 #include <machine/ipl.h>
50
51 #include <cam/cam.h>
52 #include <cam/cam_conf.h>
53 #include <cam/cam_ccb.h>
54 #include <cam/cam_periph.h>
55 #include <cam/cam_sim.h>
56 #include <cam/cam_xpt.h>
57 #include <cam/cam_xpt_sim.h>
58 #include <cam/cam_xpt_periph.h>
59 #include <cam/cam_debug.h>
60
61 #include <cam/scsi/scsi_all.h>
62 #include <cam/scsi/scsi_message.h>
63 #include <cam/scsi/scsi_pass.h>
64 #include "opt_cam.h"
65
66 extern void (*ihandlers[32]) __P((void));
67
68 /* Datastructures internal to the xpt layer */
69
70 /*
71 * Definition of an async handler callback block. These are used to add
72 * SIMs and peripherals to the async callback lists.
73 */
74 struct async_node {
75 SLIST_ENTRY(async_node) links;
76 u_int32_t event_enable; /* Async Event enables */
77 void (*callback)(void *arg, u_int32_t code,
78 struct cam_path *path, void *args);
79 void *callback_arg;
80 };
81
82 SLIST_HEAD(async_list, async_node);
83 SLIST_HEAD(periph_list, cam_periph);
84 static STAILQ_HEAD(highpowerlist, ccb_hdr) highpowerq;
85
86 /*
87 * This is the maximum number of high powered commands (e.g. start unit)
88 * that can be outstanding at a particular time.
89 */
90 #ifndef CAM_MAX_HIGHPOWER
91 #define CAM_MAX_HIGHPOWER 4
92 #endif
93
94 /* number of high powered commands that can go through right now */
95 static int num_highpower = CAM_MAX_HIGHPOWER;
96
97 /*
98 * Structure for queueing a device in a run queue.
99 * There is one run queue for allocating new ccbs,
100 * and another for sending ccbs to the controller.
101 */
102 struct cam_ed_qinfo {
103 cam_pinfo pinfo;
104 struct cam_ed *device;
105 };
106
107 /*
108 * The CAM EDT (Existing Device Table) contains the device information for
109 * all devices for all busses in the system. The table contains a
110 * cam_ed structure for each device on the bus.
111 */
112 struct cam_ed {
113 TAILQ_ENTRY(cam_ed) links;
114 struct cam_ed_qinfo alloc_ccb_entry;
115 struct cam_ed_qinfo send_ccb_entry;
116 struct cam_et *target;
117 lun_id_t lun_id;
118 struct camq drvq; /*
119 * Queue of type drivers wanting to do
120 * work on this device.
121 */
122 struct cam_ccbq ccbq; /* Queue of pending ccbs */
123 struct async_list asyncs; /* Async callback info for this B/T/L */
124 struct periph_list periphs; /* All attached devices */
125 u_int generation; /* Generation number */
126 struct cam_periph *owner; /* Peripheral driver's ownership tag */
127 struct xpt_quirk_entry *quirk; /* Oddities about this device */
128 /* Storage for the inquiry data */
129 struct scsi_inquiry_data inq_data;
130 u_int8_t inq_flags; /*
131 * Current settings for inquiry flags.
132 * This allows us to override settings
133 * like disconnection and tagged
134 * queuing for a device.
135 */
136 u_int8_t queue_flags; /* Queue flags from the control page */
137 u_int8_t *serial_num;
138 u_int8_t serial_num_len;
139 u_int32_t qfrozen_cnt;
140 u_int32_t flags;
141 #define CAM_DEV_UNCONFIGURED 0x01
142 #define CAM_DEV_REL_TIMEOUT_PENDING 0x02
143 #define CAM_DEV_REL_ON_COMPLETE 0x04
144 #define CAM_DEV_REL_ON_QUEUE_EMPTY 0x08
145 #define CAM_DEV_RESIZE_QUEUE_NEEDED 0x10
146 #define CAM_DEV_TAG_AFTER_COUNT 0x20
147 #define CAM_DEV_INQUIRY_DATA_VALID 0x40
148 u_int32_t tag_delay_count;
149 #define CAM_TAG_DELAY_COUNT 5
150 u_int32_t refcount;
151 struct callout_handle c_handle;
152 };
153
154 /*
155 * Each target is represented by an ET (Existing Target). These
156 * entries are created when a target is successfully probed with an
157 * identify, and removed when a device fails to respond after a number
158 * of retries, or a bus rescan finds the device missing.
159 */
160 struct cam_et {
161 TAILQ_HEAD(, cam_ed) ed_entries;
162 TAILQ_ENTRY(cam_et) links;
163 struct cam_eb *bus;
164 target_id_t target_id;
165 u_int32_t refcount;
166 u_int generation;
167 struct timeval last_reset;
168 };
169
170 /*
171 * Each bus is represented by an EB (Existing Bus). These entries
172 * are created by calls to xpt_bus_register and deleted by calls to
173 * xpt_bus_deregister.
174 */
175 struct cam_eb {
176 TAILQ_HEAD(, cam_et) et_entries;
177 TAILQ_ENTRY(cam_eb) links;
178 path_id_t path_id;
179 struct cam_sim *sim;
180 struct timeval last_reset;
181 u_int32_t flags;
182 #define CAM_EB_RUNQ_SCHEDULED 0x01
183 u_int32_t refcount;
184 u_int generation;
185 };
186
187 struct cam_path {
188 struct cam_periph *periph;
189 struct cam_eb *bus;
190 struct cam_et *target;
191 struct cam_ed *device;
192 };
193
194 struct xpt_quirk_entry {
195 struct scsi_inquiry_pattern inq_pat;
196 u_int8_t quirks;
197 #define CAM_QUIRK_NOLUNS 0x01
198 #define CAM_QUIRK_NOSERIAL 0x02
199 #define CAM_QUIRK_HILUNS 0x04
200 u_int mintags;
201 u_int maxtags;
202 };
203 #define CAM_SCSI2_MAXLUN 8
204
205 typedef enum {
206 XPT_FLAG_OPEN = 0x01
207 } xpt_flags;
208
209 struct xpt_softc {
210 xpt_flags flags;
211 u_int32_t generation;
212 #ifdef DEVFS
213 void *xpt_devfs_token;
214 void *ctl_devfs_token;
215 #endif
216 };
217
218 static const char quantum[] = "QUANTUM";
219 static const char sony[] = "SONY";
220 static const char west_digital[] = "WDIGTL";
221 static const char microp[] = "MICROP";
222 static const char samsung[] = "SAMSUNG";
223 static const char seagate[] = "SEAGATE";
224
225 static struct xpt_quirk_entry xpt_quirk_table[] =
226 {
227 {
228 /* Reports QUEUE FULL for temporary resource shortages */
229 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP39100*", "*" },
230 /*quirks*/0, /*mintags*/24, /*maxtags*/32
231 },
232 {
233 /* Reports QUEUE FULL for temporary resource shortages */
234 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP34550*", "*" },
235 /*quirks*/0, /*mintags*/24, /*maxtags*/32
236 },
237 {
238 /* Reports QUEUE FULL for temporary resource shortages */
239 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP32275*", "*" },
240 /*quirks*/0, /*mintags*/24, /*maxtags*/32
241 },
242 {
243 /* Broken tagged queuing drive */
244 { T_DIRECT, SIP_MEDIA_FIXED, microp, "4421-07*", "*" },
245 /*quirks*/0, /*mintags*/0, /*maxtags*/
246 },
247 {
248 /* Broken tagged queuing drive */
249 { T_DIRECT, SIP_MEDIA_FIXED, "HP", "C372*", "*" },
250 /*quirks*/0, /*mintags*/0, /*maxtags*/
251 },
252 {
253 /* Broken tagged queuing drive */
254 { T_DIRECT, SIP_MEDIA_FIXED, microp, "3391*", "x43h" },
255 /*quirks*/0, /*mintags*/0, /*maxtags*/
256 },
257 {
258 /*
259 * Unfortunately, the Quantum Atlas III has the same
260 * problem as the Atlas II drives above.
261 * Reported by: "Johan Granlund" <johan@granlund.nu>
262 *
263 * For future reference, the drive with the problem was:
264 * QUANTUM QM39100TD-SW N1B0
265 *
266 * It's possible that Quantum will fix the problem in later
267 * firmware revisions. If that happens, the quirk entry
268 * will need to be made specific to the firmware revisions
269 * with the problem.
270 *
271 */
272 /* Reports QUEUE FULL for temporary resource shortages */
273 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM39100*", "*" },
274 /*quirks*/0, /*mintags*/24, /*maxtags*/32
275 },
276 {
277 /*
278 * 18 Gig Atlas III, same problem as the 9G version.
279 * Reported by: Andre Albsmeier
280 * <andre.albsmeier@mchp.siemens.de>
281 *
282 * For future reference, the drive with the problem was:
283 * QUANTUM QM318000TD-S N491
284 */
285 /* Reports QUEUE FULL for temporary resource shortages */
286 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM318000*", "*" },
287 /*quirks*/0, /*mintags*/24, /*maxtags*/32
288 },
289 {
290 /*
291 * Broken tagged queuing drive
292 * Reported by: Bret Ford <bford@uop.cs.uop.edu>
293 * and: Martin Renters <martin@tdc.on.ca>
294 */
295 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST410800*", "71*" },
296 /*quirks*/0, /*mintags*/0, /*maxtags*/
297 },
298 /*
299 * The Seagate Medalist Pro drives have very poor write
300 * performance with anything more than 2 tags.
301 *
302 * Reported by: Paul van der Zwan <paulz@trantor.xs4all.nl>
303 * Drive: <SEAGATE ST36530N 1444>
304 *
305 * Reported by: Jeremy Lea <reg@shale.csir.co.za>
306 * Drive: <SEAGATE ST34520W 1281>
307 *
308 * No one has actually reported that the 9G version
309 * (ST39140*) of the Medalist Pro has the same problem, but
310 * we're assuming that it does because the 4G and 6.5G
311 * versions of the drive are broken.
312 */
313 {
314 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST34520*", "*"},
315 /*quirks*/0, /*mintags*/2, /*maxtags*/2
316 },
317 {
318 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST36530*", "*"},
319 /*quirks*/0, /*mintags*/2, /*maxtags*/2
320 },
321 {
322 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST39140*", "*"},
323 /*quirks*/0, /*mintags*/2, /*maxtags*/2
324 },
325 {
326 /*
327 * Slow when tagged queueing is enabled. Write performance
328 * steadily drops off with more and more concurrent
329 * transactions. Best sequential write performance with
330 * tagged queueing turned off and write caching turned on.
331 *
332 * PR: kern/10398
333 * Submitted by: Hideaki Okada <hokada@isl.melco.co.jp>
334 * Drive: DCAS-34330 w/ "S65A" firmware.
335 *
336 * The drive with the problem had the "S65A" firmware
337 * revision, and has also been reported (by Stephen J.
338 * Roznowski <sjr@home.net>) for a drive with the "S61A"
339 * firmware revision.
340 *
341 * Although no one has reported problems with the 2 gig
342 * version of the DCAS drive, the assumption is that it
343 * has the same problems as the 4 gig version. Therefore
344 * this quirk entries disables tagged queueing for all
345 * DCAS drives.
346 */
347 { T_DIRECT, SIP_MEDIA_FIXED, "IBM", "DCAS*", "*" },
348 /*quirks*/0, /*mintags*/0, /*maxtags*/
349 },
350 {
351 /* Broken tagged queuing drive */
352 { T_DIRECT, SIP_MEDIA_REMOVABLE, "iomega", "jaz*", "*" },
353 /*quirks*/0, /*mintags*/0, /*maxtags*/
354 },
355 {
356 /* Broken tagged queuing drive */
357 { T_DIRECT, SIP_MEDIA_FIXED, "CONNER", "CFP2107*", "*" },
358 /*quirks*/0, /*mintags*/0, /*maxtags*/
359 },
360 {
361 /*
362 * Broken tagged queuing drive.
363 * Submitted by:
364 * NAKAJI Hiroyuki <nakaji@zeisei.dpri.kyoto-u.ac.jp>
365 * in PR kern/9535
366 */
367 { T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN34324U*", "*" },
368 /*quirks*/0, /*mintags*/0, /*maxtags*/
369 },
370 {
371 /*
372 * Slow when tagged queueing is enabled. (1.5MB/sec versus
373 * 8MB/sec.)
374 * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
375 * Best performance with these drives is achieved with
376 * tagged queueing turned off, and write caching turned on.
377 */
378 { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "WDE*", "*" },
379 /*quirks*/0, /*mintags*/0, /*maxtags*/
380 },
381 {
382 /*
383 * Slow when tagged queueing is enabled. (1.5MB/sec versus
384 * 8MB/sec.)
385 * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
386 * Best performance with these drives is achieved with
387 * tagged queueing turned off, and write caching turned on.
388 */
389 { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "ENTERPRISE", "*" },
390 /*quirks*/0, /*mintags*/0, /*maxtags*/
391 },
392 {
393 /*
394 * Doesn't handle queue full condition correctly,
395 * so we need to limit maxtags to what the device
396 * can handle instead of determining this automatically.
397 */
398 { T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN321010S*", "*" },
399 /*quirks*/0, /*mintags*/2, /*maxtags*/32
400 },
401 {
402 /* Really only one LUN */
403 { T_ENCLOSURE, SIP_MEDIA_FIXED, "SUN", "SENA*", "*" },
404 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
405 },
406 {
407 /* I can't believe we need a quirk for DPT volumes. */
408 { T_ANY, SIP_MEDIA_FIXED|SIP_MEDIA_REMOVABLE, "DPT", "*", "*" },
409 CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS,
410 /*mintags*/0, /*maxtags*/255
411 },
412 {
413 /*
414 * Many Sony CDROM drives don't like multi-LUN probing.
415 */
416 { T_CDROM, SIP_MEDIA_REMOVABLE, sony, "CD-ROM CDU*", "*" },
417 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
418 },
419 {
420 /*
421 * This drive doesn't like multiple LUN probing.
422 * Submitted by: Parag Patel <parag@cgt.com>
423 */
424 { T_WORM, SIP_MEDIA_REMOVABLE, sony, "CD-R CDU9*", "*" },
425 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
426 },
427 {
428 { T_WORM, SIP_MEDIA_REMOVABLE, "YAMAHA", "CDR100*", "*" },
429 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
430 },
431 {
432 /*
433 * The 8200 doesn't like multi-lun probing, and probably
434 * don't like serial number requests either.
435 */
436 {
437 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE",
438 "EXB-8200*", "*"
439 },
440 CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
441 },
442 {
443 /*
444 * These Hitachi drives don't like multi-lun probing.
445 * The PR submitter has a DK319H, but says that the Linux
446 * kernel has a similar work-around for the DK312 and DK314,
447 * so all DK31* drives are quirked here.
448 * PR: misc/18793
449 * Submitted by: Paul Haddad <paul@pth.com>
450 */
451 { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "DK31*", "*" },
452 CAM_QUIRK_NOLUNS, /*mintags*/2, /*maxtags*/255
453 },
454 {
455 /*
456 * This old revision of the TDC3600 is also SCSI-1, and
457 * hangs upon serial number probing.
458 */
459 {
460 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "TANDBERG",
461 " TDC 3600", "U07:"
462 },
463 CAM_QUIRK_NOSERIAL, /*mintags*/0, /*maxtags*/
464 },
465 {
466 /*
467 * Would repond to all LUNs if asked for.
468 */
469 {
470 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "CALIPER",
471 "CP150", "*"
472 },
473 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
474 },
475 {
476 /*
477 * Would repond to all LUNs if asked for.
478 */
479 {
480 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "KENNEDY",
481 "96X2*", "*"
482 },
483 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
484 },
485 {
486 /* Submitted by: Matthew Dodd <winter@jurai.net> */
487 { T_PROCESSOR, SIP_MEDIA_FIXED, "Cabletrn", "EA41*", "*" },
488 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
489 },
490 {
491 /* Submitted by: Matthew Dodd <winter@jurai.net> */
492 { T_PROCESSOR, SIP_MEDIA_FIXED, "CABLETRN", "EA41*", "*" },
493 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/
494 },
495 {
496 /* Default tagged queuing parameters for all devices */
497 {
498 T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
499 /*vendor*/"*", /*product*/"*", /*revision*/"*"
500 },
501 /*quirks*/0, /*mintags*/2, /*maxtags*/255
502 },
503 };
504
505 static const int xpt_quirk_table_size =
506 sizeof(xpt_quirk_table) / sizeof(*xpt_quirk_table);
507
508 typedef enum {
509 DM_RET_COPY = 0x01,
510 DM_RET_FLAG_MASK = 0x0f,
511 DM_RET_NONE = 0x00,
512 DM_RET_STOP = 0x10,
513 DM_RET_DESCEND = 0x20,
514 DM_RET_ERROR = 0x30,
515 DM_RET_ACTION_MASK = 0xf0
516 } dev_match_ret;
517
518 typedef enum {
519 XPT_DEPTH_BUS,
520 XPT_DEPTH_TARGET,
521 XPT_DEPTH_DEVICE,
522 XPT_DEPTH_PERIPH
523 } xpt_traverse_depth;
524
525 struct xpt_traverse_config {
526 xpt_traverse_depth depth;
527 void *tr_func;
528 void *tr_arg;
529 };
530
531 typedef int xpt_busfunc_t (struct cam_eb *bus, void *arg);
532 typedef int xpt_targetfunc_t (struct cam_et *target, void *arg);
533 typedef int xpt_devicefunc_t (struct cam_ed *device, void *arg);
534 typedef int xpt_periphfunc_t (struct cam_periph *periph, void *arg);
535 typedef int xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg);
536
537 /* Transport layer configuration information */
538 static struct xpt_softc xsoftc;
539
540 /* Queues for our software interrupt handler */
541 typedef TAILQ_HEAD(cam_isrq, ccb_hdr) cam_isrq_t;
542 static cam_isrq_t cam_bioq;
543 static cam_isrq_t cam_netq;
544
545 /* "Pool" of inactive ccbs managed by xpt_alloc_ccb and xpt_free_ccb */
546 static SLIST_HEAD(,ccb_hdr) ccb_freeq;
547 static u_int xpt_max_ccbs; /*
548 * Maximum size of ccb pool. Modified as
549 * devices are added/removed or have their
550 * opening counts changed.
551 */
552 static u_int xpt_ccb_count; /* Current count of allocated ccbs */
553
554 static struct cam_periph *xpt_periph;
555
556 static periph_init_t xpt_periph_init;
557
558 static periph_init_t probe_periph_init;
559
560 static struct periph_driver xpt_driver =
561 {
562 xpt_periph_init, "xpt",
563 TAILQ_HEAD_INITIALIZER(xpt_driver.units)
564 };
565
566 static struct periph_driver probe_driver =
567 {
568 probe_periph_init, "probe",
569 TAILQ_HEAD_INITIALIZER(probe_driver.units)
570 };
571
572 DATA_SET(periphdriver_set, xpt_driver);
573 DATA_SET(periphdriver_set, probe_driver);
574
575 #define XPT_CDEV_MAJOR 104
576
577 static d_open_t xptopen;
578 static d_close_t xptclose;
579 static d_ioctl_t xptioctl;
580
581 static struct cdevsw xpt_cdevsw =
582 {
583 /*d_open*/ xptopen,
584 /*d_close*/ xptclose,
585 /*d_read*/ noread,
586 /*d_write*/ nowrite,
587 /*d_ioctl*/ xptioctl,
588 /*d_stop*/ nostop,
589 /*d_reset*/ noreset,
590 /*d_devtotty*/ nodevtotty,
591 /*d_poll*/ NULL,
592 /*d_mmap*/ nommap,
593 /*d_strategy*/ nostrategy,
594 /*d_name*/ "xpt",
595 /*d_spare*/ NULL,
596 /*d_maj*/ -1,
597 /*d_dump*/ nodump,
598 /*d_psize*/ nopsize,
599 /*d_flags*/ 0,
600 /*d_maxio*/ 0,
601 /*b_maj*/ -1
602 };
603
604 static struct intr_config_hook *xpt_config_hook;
605
606 /* Registered busses */
607 static TAILQ_HEAD(,cam_eb) xpt_busses;
608 static u_int bus_generation;
609
610 /* Storage for debugging datastructures */
611 #ifdef CAMDEBUG
612 struct cam_path *cam_dpath;
613 u_int32_t cam_dflags;
614 #endif
615
616 #if defined(CAM_DEBUG_FLAGS) && !defined(CAMDEBUG)
617 #error "You must have options CAMDEBUG to use options CAM_DEBUG_FLAGS"
618 #endif
619
620 /*
621 * In order to enable the CAM_DEBUG_* options, the user must have CAMDEBUG
622 * enabled. Also, the user must have either none, or all of CAM_DEBUG_BUS,
623 * CAM_DEBUG_TARGET, and CAM_DEBUG_LUN specified.
624 */
625 #if defined(CAM_DEBUG_BUS) || defined(CAM_DEBUG_TARGET) \
626 || defined(CAM_DEBUG_LUN)
627 #ifdef CAMDEBUG
628 #if !defined(CAM_DEBUG_BUS) || !defined(CAM_DEBUG_TARGET) \
629 || !defined(CAM_DEBUG_LUN)
630 #error "You must define all or none of CAM_DEBUG_BUS, CAM_DEBUG_TARGET \
631 and CAM_DEBUG_LUN"
632 #endif /* !CAM_DEBUG_BUS || !CAM_DEBUG_TARGET || !CAM_DEBUG_LUN */
633 #else /* !CAMDEBUG */
634 #error "You must use options CAMDEBUG if you use the CAM_DEBUG_* options"
635 #endif /* CAMDEBUG */
636 #endif /* CAM_DEBUG_BUS || CAM_DEBUG_TARGET || CAM_DEBUG_LUN */
637
638 /* Our boot-time initialization hook */
639 static void xpt_init(void *);
640 SYSINIT(cam, SI_SUB_CONFIGURE, SI_ORDER_SECOND, xpt_init, NULL);
641
642 static cam_status xpt_compile_path(struct cam_path *new_path,
643 struct cam_periph *perph,
644 path_id_t path_id,
645 target_id_t target_id,
646 lun_id_t lun_id);
647
648 static void xpt_release_path(struct cam_path *path);
649
650 static void xpt_async_bcast(struct async_list *async_head,
651 u_int32_t async_code,
652 struct cam_path *path,
653 void *async_arg);
654 static int xptnextfreebus(path_id_t startbus);
655 static int xptpathid(const char *sim_name, int sim_unit, int sim_bus,
656 path_id_t *nextpath);
657 static union ccb *xpt_get_ccb(struct cam_ed *device);
658 static int xpt_schedule_dev(struct camq *queue, cam_pinfo *dev_pinfo,
659 u_int32_t new_priority);
660 static void xpt_run_dev_allocq(struct cam_eb *bus);
661 static void xpt_run_dev_sendq(struct cam_eb *bus);
662 static timeout_t xpt_release_devq_timeout;
663 static timeout_t xpt_release_simq_timeout;
664 static void xpt_release_bus(struct cam_eb *bus);
665 static struct cam_et*
666 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id);
667 static void xpt_release_target(struct cam_eb *bus, struct cam_et *target);
668 static struct cam_ed*
669 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target,
670 lun_id_t lun_id);
671 static void xpt_release_device(struct cam_eb *bus, struct cam_et *target,
672 struct cam_ed *device);
673 static u_int32_t xpt_dev_ccbq_resize(struct cam_path *path, int newopenings);
674 static struct cam_eb*
675 xpt_find_bus(path_id_t path_id);
676 static struct cam_et*
677 xpt_find_target(struct cam_eb *bus, target_id_t target_id);
678 static struct cam_ed*
679 xpt_find_device(struct cam_et *target, lun_id_t lun_id);
680 static void xpt_scan_bus(struct cam_periph *periph, union ccb *ccb);
681 static void xpt_scan_lun(struct cam_periph *periph,
682 struct cam_path *path, cam_flags flags,
683 union ccb *ccb);
684 static void xptscandone(struct cam_periph *periph, union ccb *done_ccb);
685 static xpt_busfunc_t xptconfigbuscountfunc;
686 static xpt_busfunc_t xptconfigfunc;
687 static void xpt_config(void *arg);
688 static xpt_devicefunc_t xptpassannouncefunc;
689 static void xpt_finishconfig(struct cam_periph *periph, union ccb *ccb);
690 static void xptaction(struct cam_sim *sim, union ccb *work_ccb);
691 static swihand_t swi_camnet;
692 static swihand_t swi_cambio;
693 static void camisr(cam_isrq_t *queue);
694 #if 0
695 static void xptstart(struct cam_periph *periph, union ccb *work_ccb);
696 static void xptasync(struct cam_periph *periph,
697 u_int32_t code, cam_path *path);
698 #endif
699 static dev_match_ret xptbusmatch(struct dev_match_pattern *patterns,
700 int num_patterns, struct cam_eb *bus);
701 static dev_match_ret xptdevicematch(struct dev_match_pattern *patterns,
702 int num_patterns, struct cam_ed *device);
703 static dev_match_ret xptperiphmatch(struct dev_match_pattern *patterns,
704 int num_patterns,
705 struct cam_periph *periph);
706 static xpt_busfunc_t xptedtbusfunc;
707 static xpt_targetfunc_t xptedttargetfunc;
708 static xpt_devicefunc_t xptedtdevicefunc;
709 static xpt_periphfunc_t xptedtperiphfunc;
710 static xpt_pdrvfunc_t xptplistpdrvfunc;
711 static xpt_periphfunc_t xptplistperiphfunc;
712 static int xptedtmatch(struct ccb_dev_match *cdm);
713 static int xptperiphlistmatch(struct ccb_dev_match *cdm);
714 static int xptbustraverse(struct cam_eb *start_bus,
715 xpt_busfunc_t *tr_func, void *arg);
716 static int xpttargettraverse(struct cam_eb *bus,
717 struct cam_et *start_target,
718 xpt_targetfunc_t *tr_func, void *arg);
719 static int xptdevicetraverse(struct cam_et *target,
720 struct cam_ed *start_device,
721 xpt_devicefunc_t *tr_func, void *arg);
722 static int xptperiphtraverse(struct cam_ed *device,
723 struct cam_periph *start_periph,
724 xpt_periphfunc_t *tr_func, void *arg);
725 static int xptpdrvtraverse(struct periph_driver **start_pdrv,
726 xpt_pdrvfunc_t *tr_func, void *arg);
727 static int xptpdperiphtraverse(struct periph_driver **pdrv,
728 struct cam_periph *start_periph,
729 xpt_periphfunc_t *tr_func,
730 void *arg);
731 static xpt_busfunc_t xptdefbusfunc;
732 static xpt_targetfunc_t xptdeftargetfunc;
733 static xpt_devicefunc_t xptdefdevicefunc;
734 static xpt_periphfunc_t xptdefperiphfunc;
735 static int xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg);
736 #ifdef notusedyet
737 static int xpt_for_all_targets(xpt_targetfunc_t *tr_func,
738 void *arg);
739 #endif
740 static int xpt_for_all_devices(xpt_devicefunc_t *tr_func,
741 void *arg);
742 #ifdef notusedyet
743 static int xpt_for_all_periphs(xpt_periphfunc_t *tr_func,
744 void *arg);
745 #endif
746 static xpt_devicefunc_t xptsetasyncfunc;
747 static xpt_busfunc_t xptsetasyncbusfunc;
748 static cam_status xptregister(struct cam_periph *periph,
749 void *arg);
750 static cam_status proberegister(struct cam_periph *periph,
751 void *arg);
752 static void probeschedule(struct cam_periph *probe_periph);
753 static void probestart(struct cam_periph *periph, union ccb *start_ccb);
754 static void proberequestdefaultnegotiation(struct cam_periph *periph);
755 static void probedone(struct cam_periph *periph, union ccb *done_ccb);
756 static void probecleanup(struct cam_periph *periph);
757 static void xpt_find_quirk(struct cam_ed *device);
758 static void xpt_set_transfer_settings(struct ccb_trans_settings *cts,
759 struct cam_ed *device,
760 int async_update);
761 static void xpt_toggle_tags(struct cam_path *path);
762 static void xpt_start_tags(struct cam_path *path);
763 static __inline int xpt_schedule_dev_allocq(struct cam_eb *bus,
764 struct cam_ed *dev);
765 static __inline int xpt_schedule_dev_sendq(struct cam_eb *bus,
766 struct cam_ed *dev);
767 static __inline int periph_is_queued(struct cam_periph *periph);
768 static __inline int device_is_alloc_queued(struct cam_ed *device);
769 static __inline int device_is_send_queued(struct cam_ed *device);
770 static __inline int dev_allocq_is_runnable(struct cam_devq *devq);
771
772 static __inline int
773 xpt_schedule_dev_allocq(struct cam_eb *bus, struct cam_ed *dev)
774 {
775 int retval;
776
777 if (dev->ccbq.devq_openings > 0) {
778 if ((dev->flags & CAM_DEV_RESIZE_QUEUE_NEEDED) != 0) {
779 cam_ccbq_resize(&dev->ccbq,
780 dev->ccbq.dev_openings
781 + dev->ccbq.dev_active);
782 dev->flags &= ~CAM_DEV_RESIZE_QUEUE_NEEDED;
783 }
784 /*
785 * The priority of a device waiting for CCB resources
786 * is that of the the highest priority peripheral driver
787 * enqueued.
788 */
789 retval = xpt_schedule_dev(&bus->sim->devq->alloc_queue,
790 &dev->alloc_ccb_entry.pinfo,
791 CAMQ_GET_HEAD(&dev->drvq)->priority);
792 } else {
793 retval = 0;
794 }
795
796 return (retval);
797 }
798
799 static __inline int
800 xpt_schedule_dev_sendq(struct cam_eb *bus, struct cam_ed *dev)
801 {
802 int retval;
803
804 if (dev->ccbq.dev_openings > 0) {
805 /*
806 * The priority of a device waiting for controller
807 * resources is that of the the highest priority CCB
808 * enqueued.
809 */
810 retval =
811 xpt_schedule_dev(&bus->sim->devq->send_queue,
812 &dev->send_ccb_entry.pinfo,
813 CAMQ_GET_HEAD(&dev->ccbq.queue)->priority);
814 } else {
815 retval = 0;
816 }
817 return (retval);
818 }
819
820 static __inline int
821 periph_is_queued(struct cam_periph *periph)
822 {
823 return (periph->pinfo.index != CAM_UNQUEUED_INDEX);
824 }
825
826 static __inline int
827 device_is_alloc_queued(struct cam_ed *device)
828 {
829 return (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
830 }
831
832 static __inline int
833 device_is_send_queued(struct cam_ed *device)
834 {
835 return (device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
836 }
837
838 static __inline int
839 dev_allocq_is_runnable(struct cam_devq *devq)
840 {
841 /*
842 * Have work to do.
843 * Have space to do more work.
844 * Allowed to do work.
845 */
846 return ((devq->alloc_queue.qfrozen_cnt == 0)
847 && (devq->alloc_queue.entries > 0)
848 && (devq->alloc_openings > 0));
849 }
850
851 static void
852 xpt_periph_init()
853 {
854 dev_t dev;
855
856 dev = makedev(XPT_CDEV_MAJOR, 0);
857 cdevsw_add(&dev, &xpt_cdevsw, NULL);
858 }
859
860 static void
861 probe_periph_init()
862 {
863 }
864
865
866 static void
867 xptdone(struct cam_periph *periph, union ccb *done_ccb)
868 {
869 /* Caller will release the CCB */
870 wakeup(&done_ccb->ccb_h.cbfcnp);
871 }
872
873 static int
874 xptopen(dev_t dev, int flags, int fmt, struct proc *p)
875 {
876 int unit;
877
878 unit = minor(dev) & 0xff;
879
880 /*
881 * Only allow read-write access.
882 */
883 if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0))
884 return(EPERM);
885
886 /*
887 * We don't allow nonblocking access.
888 */
889 if ((flags & O_NONBLOCK) != 0) {
890 printf("xpt%d: can't do nonblocking accesss\n", unit);
891 return(ENODEV);
892 }
893
894 /*
895 * We only have one transport layer right now. If someone accesses
896 * us via something other than minor number 1, point out their
897 * mistake.
898 */
899 if (unit != 0) {
900 printf("xptopen: got invalid xpt unit %d\n", unit);
901 return(ENXIO);
902 }
903
904 /* Mark ourselves open */
905 xsoftc.flags |= XPT_FLAG_OPEN;
906
907 return(0);
908 }
909
910 static int
911 xptclose(dev_t dev, int flag, int fmt, struct proc *p)
912 {
913 int unit;
914
915 unit = minor(dev) & 0xff;
916
917 /*
918 * We only have one transport layer right now. If someone accesses
919 * us via something other than minor number 1, point out their
920 * mistake.
921 */
922 if (unit != 0) {
923 printf("xptclose: got invalid xpt unit %d\n", unit);
924 return(ENXIO);
925 }
926
927 /* Mark ourselves closed */
928 xsoftc.flags &= ~XPT_FLAG_OPEN;
929
930 return(0);
931 }
932
933 static int
934 xptioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct proc *p)
935 {
936 int unit, error;
937
938 error = 0;
939 unit = minor(dev) & 0xff;
940
941 /*
942 * We only have one transport layer right now. If someone accesses
943 * us via something other than minor number 1, point out their
944 * mistake.
945 */
946 if (unit != 0) {
947 printf("xptioctl: got invalid xpt unit %d\n", unit);
948 return(ENXIO);
949 }
950
951 switch(cmd) {
952 /*
953 * For the transport layer CAMIOCOMMAND ioctl, we really only want
954 * to accept CCB types that don't quite make sense to send through a
955 * passthrough driver.
956 */
957 case CAMIOCOMMAND: {
958 union ccb *ccb;
959 union ccb *inccb;
960
961 inccb = (union ccb *)addr;
962
963 switch(inccb->ccb_h.func_code) {
964 case XPT_SCAN_BUS:
965 case XPT_RESET_BUS:
966 if ((inccb->ccb_h.target_id != CAM_TARGET_WILDCARD)
967 || (inccb->ccb_h.target_lun != CAM_LUN_WILDCARD)) {
968 error = EINVAL;
969 break;
970 }
971 /* FALLTHROUGH */
972 case XPT_SCAN_LUN:
973 case XPT_ENG_INQ: /* XXX not implemented yet */
974 case XPT_ENG_EXEC:
975
976 ccb = xpt_alloc_ccb();
977
978 /*
979 * Create a path using the bus, target, and lun the
980 * user passed in.
981 */
982 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph,
983 inccb->ccb_h.path_id,
984 inccb->ccb_h.target_id,
985 inccb->ccb_h.target_lun) !=
986 CAM_REQ_CMP){
987 error = EINVAL;
988 xpt_free_ccb(ccb);
989 break;
990 }
991 /* Ensure all of our fields are correct */
992 xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path,
993 inccb->ccb_h.pinfo.priority);
994 xpt_merge_ccb(ccb, inccb);
995 ccb->ccb_h.cbfcnp = xptdone;
996 cam_periph_runccb(ccb, NULL, 0, 0, NULL);
997 bcopy(ccb, inccb, sizeof(union ccb));
998 xpt_free_path(ccb->ccb_h.path);
999 xpt_free_ccb(ccb);
1000 break;
1001
1002 case XPT_DEBUG: {
1003 union ccb ccb;
1004
1005 /*
1006 * This is an immediate CCB, so it's okay to
1007 * allocate it on the stack.
1008 */
1009
1010 /*
1011 * Create a path using the bus, target, and lun the
1012 * user passed in.
1013 */
1014 if (xpt_create_path(&ccb.ccb_h.path, xpt_periph,
1015 inccb->ccb_h.path_id,
1016 inccb->ccb_h.target_id,
1017 inccb->ccb_h.target_lun) !=
1018 CAM_REQ_CMP){
1019 error = EINVAL;
1020 break;
1021 }
1022 /* Ensure all of our fields are correct */
1023 xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path,
1024 inccb->ccb_h.pinfo.priority);
1025 xpt_merge_ccb(&ccb, inccb);
1026 ccb.ccb_h.cbfcnp = xptdone;
1027 xpt_action(&ccb);
1028 bcopy(&ccb, inccb, sizeof(union ccb));
1029 xpt_free_path(ccb.ccb_h.path);
1030 break;
1031
1032 }
1033 case XPT_DEV_MATCH: {
1034 struct cam_periph_map_info mapinfo;
1035 struct cam_path *old_path;
1036
1037 /*
1038 * We can't deal with physical addresses for this
1039 * type of transaction.
1040 */
1041 if (inccb->ccb_h.flags & CAM_DATA_PHYS) {
1042 error = EINVAL;
1043 break;
1044 }
1045
1046 /*
1047 * Save this in case the caller had it set to
1048 * something in particular.
1049 */
1050 old_path = inccb->ccb_h.path;
1051
1052 /*
1053 * We really don't need a path for the matching
1054 * code. The path is needed because of the
1055 * debugging statements in xpt_action(). They
1056 * assume that the CCB has a valid path.
1057 */
1058 inccb->ccb_h.path = xpt_periph->path;
1059
1060 bzero(&mapinfo, sizeof(mapinfo));
1061
1062 /*
1063 * Map the pattern and match buffers into kernel
1064 * virtual address space.
1065 */
1066 error = cam_periph_mapmem(inccb, &mapinfo);
1067
1068 if (error) {
1069 inccb->ccb_h.path = old_path;
1070 break;
1071 }
1072
1073 /*
1074 * This is an immediate CCB, we can send it on directly.
1075 */
1076 xpt_action(inccb);
1077
1078 /*
1079 * Map the buffers back into user space.
1080 */
1081 cam_periph_unmapmem(inccb, &mapinfo);
1082
1083 inccb->ccb_h.path = old_path;
1084
1085 error = 0;
1086 break;
1087 }
1088 default:
1089 error = EINVAL;
1090 break;
1091 }
1092 break;
1093 }
1094 /*
1095 * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input,
1096 * with the periphal driver name and unit name filled in. The other
1097 * fields don't really matter as input. The passthrough driver name
1098 * ("pass"), and unit number are passed back in the ccb. The current
1099 * device generation number, and the index into the device peripheral
1100 * driver list, and the status are also passed back. Note that
1101 * since we do everything in one pass, unlike the XPT_GDEVLIST ccb,
1102 * we never return a status of CAM_GDEVLIST_LIST_CHANGED. It is
1103 * (or rather should be) impossible for the device peripheral driver
1104 * list to change since we look at the whole thing in one pass, and
1105 * we do it with splcam protection.
1106 *
1107 */
1108 case CAMGETPASSTHRU: {
1109 union ccb *ccb;
1110 struct cam_periph *periph;
1111 struct periph_driver **p_drv;
1112 char *name;
1113 int unit;
1114 int cur_generation;
1115 int base_periph_found;
1116 int splbreaknum;
1117 int s;
1118
1119 ccb = (union ccb *)addr;
1120 unit = ccb->cgdl.unit_number;
1121 name = ccb->cgdl.periph_name;
1122 /*
1123 * Every 100 devices, we want to drop our spl protection to
1124 * give the software interrupt handler a chance to run.
1125 * Most systems won't run into this check, but this should
1126 * avoid starvation in the software interrupt handler in
1127 * large systems.
1128 */
1129 splbreaknum = 100;
1130
1131 ccb = (union ccb *)addr;
1132
1133 base_periph_found = 0;
1134
1135 /*
1136 * Sanity check -- make sure we don't get a null peripheral
1137 * driver name.
1138 */
1139 if (*ccb->cgdl.periph_name == '\0') {
1140 error = EINVAL;
1141 break;
1142 }
1143
1144 /* Keep the list from changing while we traverse it */
1145 s = splcam();
1146 ptstartover:
1147 cur_generation = xsoftc.generation;
1148
1149 /* first find our driver in the list of drivers */
1150 for (p_drv = (struct periph_driver **)periphdriver_set.ls_items;
1151 *p_drv != NULL; p_drv++)
1152 if (strcmp((*p_drv)->driver_name, name) == 0)
1153 break;
1154
1155 if (*p_drv == NULL) {
1156 splx(s);
1157 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1158 ccb->cgdl.status = CAM_GDEVLIST_ERROR;
1159 *ccb->cgdl.periph_name = '\0';
1160 ccb->cgdl.unit_number = 0;
1161 error = ENOENT;
1162 break;
1163 }
1164
1165 /*
1166 * Run through every peripheral instance of this driver
1167 * and check to see whether it matches the unit passed
1168 * in by the user. If it does, get out of the loops and
1169 * find the passthrough driver associated with that
1170 * peripheral driver.
1171 */
1172 for (periph = TAILQ_FIRST(&(*p_drv)->units); periph != NULL;
1173 periph = TAILQ_NEXT(periph, unit_links)) {
1174
1175 if (periph->unit_number == unit) {
1176 break;
1177 } else if (--splbreaknum == 0) {
1178 splx(s);
1179 s = splcam();
1180 splbreaknum = 100;
1181 if (cur_generation != xsoftc.generation)
1182 goto ptstartover;
1183 }
1184 }
1185 /*
1186 * If we found the peripheral driver that the user passed
1187 * in, go through all of the peripheral drivers for that
1188 * particular device and look for a passthrough driver.
1189 */
1190 if (periph != NULL) {
1191 struct cam_ed *device;
1192 int i;
1193
1194 base_periph_found = 1;
1195 device = periph->path->device;
1196 for (i = 0, periph = device->periphs.slh_first;
1197 periph != NULL;
1198 periph = periph->periph_links.sle_next, i++) {
1199 /*
1200 * Check to see whether we have a
1201 * passthrough device or not.
1202 */
1203 if (strcmp(periph->periph_name, "pass") == 0) {
1204 /*
1205 * Fill in the getdevlist fields.
1206 */
1207 strcpy(ccb->cgdl.periph_name,
1208 periph->periph_name);
1209 ccb->cgdl.unit_number =
1210 periph->unit_number;
1211 if (periph->periph_links.sle_next)
1212 ccb->cgdl.status =
1213 CAM_GDEVLIST_MORE_DEVS;
1214 else
1215 ccb->cgdl.status =
1216 CAM_GDEVLIST_LAST_DEVICE;
1217 ccb->cgdl.generation =
1218 device->generation;
1219 ccb->cgdl.index = i;
1220 /*
1221 * Fill in some CCB header fields
1222 * that the user may want.
1223 */
1224 ccb->ccb_h.path_id =
1225 periph->path->bus->path_id;
1226 ccb->ccb_h.target_id =
1227 periph->path->target->target_id;
1228 ccb->ccb_h.target_lun =
1229 periph->path->device->lun_id;
1230 ccb->ccb_h.status = CAM_REQ_CMP;
1231 break;
1232 }
1233 }
1234 }
1235
1236 /*
1237 * If the periph is null here, one of two things has
1238 * happened. The first possibility is that we couldn't
1239 * find the unit number of the particular peripheral driver
1240 * that the user is asking about. e.g. the user asks for
1241 * the passthrough driver for "da11". We find the list of
1242 * "da" peripherals all right, but there is no unit 11.
1243 * The other possibility is that we went through the list
1244 * of peripheral drivers attached to the device structure,
1245 * but didn't find one with the name "pass". Either way,
1246 * we return ENOENT, since we couldn't find something.
1247 */
1248 if (periph == NULL) {
1249 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1250 ccb->cgdl.status = CAM_GDEVLIST_ERROR;
1251 *ccb->cgdl.periph_name = '\0';
1252 ccb->cgdl.unit_number = 0;
1253 error = ENOENT;
1254 /*
1255 * It is unfortunate that this is even necessary,
1256 * but there are many, many clueless users out there.
1257 * If this is true, the user is looking for the
1258 * passthrough driver, but doesn't have one in his
1259 * kernel.
1260 */
1261 if (base_periph_found == 1) {
1262 printf("xptioctl: pass driver is not in the "
1263 "kernel\n");
1264 printf("xptioctl: put \"device pass0\" in "
1265 "your kernel config file\n");
1266 }
1267 }
1268 splx(s);
1269 break;
1270 }
1271 default:
1272 error = ENOTTY;
1273 break;
1274 }
1275
1276 return(error);
1277 }
1278
1279 /* Functions accessed by the peripheral drivers */
1280 static void
1281 xpt_init(dummy)
1282 void *dummy;
1283 {
1284 struct cam_sim *xpt_sim;
1285 struct cam_path *path;
1286 struct cam_devq;
1287 cam_status status;
1288
1289 TAILQ_INIT(&xpt_busses);
1290 TAILQ_INIT(&cam_bioq);
1291 TAILQ_INIT(&cam_netq);
1292 SLIST_INIT(&ccb_freeq);
1293 STAILQ_INIT(&highpowerq);
1294
1295 /*
1296 * The xpt layer is, itself, the equivelent of a SIM.
1297 * Allow 16 ccbs in the ccb pool for it. This should
1298 * give decent parallelism when we probe busses and
1299 * perform other XPT functions.
1300 */
1301 xpt_sim = (struct cam_sim *)malloc(sizeof(*xpt_sim),
1302 M_DEVBUF, M_WAITOK);
1303 xpt_sim->sim_action = xptaction;
1304 xpt_sim->sim_name = "xpt";
1305 xpt_sim->path_id = CAM_XPT_PATH_ID;
1306 xpt_sim->bus_id = 0;
1307 xpt_sim->max_tagged_dev_openings = 0;
1308 xpt_sim->max_dev_openings = 0;
1309 xpt_sim->devq = cam_simq_alloc(16);
1310 xpt_max_ccbs = 16;
1311
1312 xpt_bus_register(xpt_sim, 0);
1313
1314 /*
1315 * Looking at the XPT from the SIM layer, the XPT is
1316 * the equivelent of a peripheral driver. Allocate
1317 * a peripheral driver entry for us.
1318 */
1319 if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID,
1320 CAM_TARGET_WILDCARD,
1321 CAM_LUN_WILDCARD)) != CAM_REQ_CMP) {
1322 printf("xpt_init: xpt_create_path failed with status %#x,"
1323 " failing attach\n", status);
1324 return;
1325 }
1326
1327 cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO,
1328 path, NULL, 0, NULL);
1329 xpt_free_path(path);
1330
1331 xpt_sim->softc = xpt_periph;
1332
1333 /*
1334 * Register a callback for when interrupts are enabled.
1335 */
1336 xpt_config_hook =
1337 (struct intr_config_hook *)malloc(sizeof(struct intr_config_hook),
1338 M_TEMP, M_NOWAIT);
1339 if (xpt_config_hook == NULL) {
1340 printf("xpt_init: Cannot malloc config hook "
1341 "- failing attach\n");
1342 return;
1343 }
1344 bzero(xpt_config_hook, sizeof(*xpt_config_hook));
1345
1346 xpt_config_hook->ich_func = xpt_config;
1347 if (config_intrhook_establish(xpt_config_hook) != 0) {
1348 free (xpt_config_hook, M_TEMP);
1349 printf("xpt_init: config_intrhook_establish failed "
1350 "- failing attach\n");
1351 }
1352
1353 /* Install our software interrupt handlers */
1354 register_swi(SWI_CAMNET, swi_camnet);
1355 register_swi(SWI_CAMBIO, swi_cambio);
1356 }
1357
1358 static cam_status
1359 xptregister(struct cam_periph *periph, void *arg)
1360 {
1361 if (periph == NULL) {
1362 printf("xptregister: periph was NULL!!\n");
1363 return(CAM_REQ_CMP_ERR);
1364 }
1365
1366 periph->softc = NULL;
1367
1368 xpt_periph = periph;
1369
1370 return(CAM_REQ_CMP);
1371 }
1372
1373 int32_t
1374 xpt_add_periph(struct cam_periph *periph)
1375 {
1376 struct cam_ed *device;
1377 int32_t status;
1378 struct periph_list *periph_head;
1379
1380 device = periph->path->device;
1381
1382 periph_head = &device->periphs;
1383
1384 status = CAM_REQ_CMP;
1385
1386 if (device != NULL) {
1387 int s;
1388
1389 /*
1390 * Make room for this peripheral
1391 * so it will fit in the queue
1392 * when it's scheduled to run
1393 */
1394 s = splsoftcam();
1395 status = camq_resize(&device->drvq,
1396 device->drvq.array_size + 1);
1397
1398 device->generation++;
1399
1400 SLIST_INSERT_HEAD(periph_head, periph, periph_links);
1401
1402 splx(s);
1403 }
1404
1405 xsoftc.generation++;
1406
1407 return (status);
1408 }
1409
1410 void
1411 xpt_remove_periph(struct cam_periph *periph)
1412 {
1413 struct cam_ed *device;
1414
1415 device = periph->path->device;
1416
1417 if (device != NULL) {
1418 int s;
1419 struct periph_list *periph_head;
1420
1421 periph_head = &device->periphs;
1422
1423 /* Release the slot for this peripheral */
1424 s = splsoftcam();
1425 camq_resize(&device->drvq, device->drvq.array_size - 1);
1426
1427 device->generation++;
1428
1429 SLIST_REMOVE(periph_head, periph, cam_periph, periph_links);
1430
1431 splx(s);
1432 }
1433
1434 xsoftc.generation++;
1435
1436 }
1437
1438 void
1439 xpt_announce_periph(struct cam_periph *periph, char *announce_string)
1440 {
1441 int s;
1442 u_int mb;
1443 struct cam_path *path;
1444 struct ccb_trans_settings cts;
1445
1446 path = periph->path;
1447 /*
1448 * To ensure that this is printed in one piece,
1449 * mask out CAM interrupts.
1450 */
1451 s = splsoftcam();
1452 printf("%s%d at %s%d bus %d target %d lun %d\n",
1453 periph->periph_name, periph->unit_number,
1454 path->bus->sim->sim_name,
1455 path->bus->sim->unit_number,
1456 path->bus->sim->bus_id,
1457 path->target->target_id,
1458 path->device->lun_id);
1459 printf("%s%d: ", periph->periph_name, periph->unit_number);
1460 scsi_print_inquiry(&path->device->inq_data);
1461 if ((bootverbose)
1462 && (path->device->serial_num_len > 0)) {
1463 /* Don't wrap the screen - print only the first 60 chars */
1464 printf("%s%d: Serial Number %.60s\n", periph->periph_name,
1465 periph->unit_number, path->device->serial_num);
1466 }
1467 xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1);
1468 cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
1469 cts.flags = CCB_TRANS_CURRENT_SETTINGS;
1470 xpt_action((union ccb*)&cts);
1471 if (cts.ccb_h.status == CAM_REQ_CMP) {
1472 u_int speed;
1473 u_int freq;
1474
1475 if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1476 && cts.sync_offset != 0) {
1477 freq = scsi_calc_syncsrate(cts.sync_period);
1478 speed = freq;
1479 } else {
1480 struct ccb_pathinq cpi;
1481
1482 /* Ask the SIM for its base transfer speed */
1483 xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
1484 cpi.ccb_h.func_code = XPT_PATH_INQ;
1485 xpt_action((union ccb *)&cpi);
1486
1487 speed = cpi.base_transfer_speed;
1488 freq = 0;
1489 }
1490 if ((cts.valid & CCB_TRANS_BUS_WIDTH_VALID) != 0)
1491 speed *= (0x01 << cts.bus_width);
1492 mb = speed / 1000;
1493 if (mb > 0)
1494 printf("%s%d: %d.%03dMB/s transfers",
1495 periph->periph_name, periph->unit_number,
1496 mb, speed % 1000);
1497 else
1498 printf("%s%d: %dKB/s transfers", periph->periph_name,
1499 periph->unit_number, (speed % 1000) * 1000);
1500 if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1501 && cts.sync_offset != 0) {
1502 printf(" (%d.%03dMHz, offset %d", freq / 1000,
1503 freq % 1000, cts.sync_offset);
1504 }
1505 if ((cts.valid & CCB_TRANS_BUS_WIDTH_VALID) != 0
1506 && cts.bus_width > 0) {
1507 if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1508 && cts.sync_offset != 0) {
1509 printf(", ");
1510 } else {
1511 printf(" (");
1512 }
1513 printf("%dbit)", 8 * (0x01 << cts.bus_width));
1514 } else if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1515 && cts.sync_offset != 0) {
1516 printf(")");
1517 }
1518
1519 if (path->device->inq_flags & SID_CmdQue
1520 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
1521 printf(", Tagged Queueing Enabled");
1522 }
1523
1524 printf("\n");
1525 } else if (path->device->inq_flags & SID_CmdQue
1526 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
1527 printf("%s%d: Tagged Queueing Enabled\n",
1528 periph->periph_name, periph->unit_number);
1529 }
1530
1531 /*
1532 * We only want to print the caller's announce string if they've
1533 * passed one in..
1534 */
1535 if (announce_string != NULL)
1536 printf("%s%d: %s\n", periph->periph_name,
1537 periph->unit_number, announce_string);
1538 splx(s);
1539 }
1540
1541
1542 static dev_match_ret
1543 xptbusmatch(struct dev_match_pattern *patterns, int num_patterns,
1544 struct cam_eb *bus)
1545 {
1546 dev_match_ret retval;
1547 int i;
1548
1549 retval = DM_RET_NONE;
1550
1551 /*
1552 * If we aren't given something to match against, that's an error.
1553 */
1554 if (bus == NULL)
1555 return(DM_RET_ERROR);
1556
1557 /*
1558 * If there are no match entries, then this bus matches no
1559 * matter what.
1560 */
1561 if ((patterns == NULL) || (num_patterns == 0))
1562 return(DM_RET_DESCEND | DM_RET_COPY);
1563
1564 for (i = 0; i < num_patterns; i++) {
1565 struct bus_match_pattern *cur_pattern;
1566
1567 /*
1568 * If the pattern in question isn't for a bus node, we
1569 * aren't interested. However, we do indicate to the
1570 * calling routine that we should continue descending the
1571 * tree, since the user wants to match against lower-level
1572 * EDT elements.
1573 */
1574 if (patterns[i].type != DEV_MATCH_BUS) {
1575 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1576 retval |= DM_RET_DESCEND;
1577 continue;
1578 }
1579
1580 cur_pattern = &patterns[i].pattern.bus_pattern;
1581
1582 /*
1583 * If they want to match any bus node, we give them any
1584 * device node.
1585 */
1586 if (cur_pattern->flags == BUS_MATCH_ANY) {
1587 /* set the copy flag */
1588 retval |= DM_RET_COPY;
1589
1590 /*
1591 * If we've already decided on an action, go ahead
1592 * and return.
1593 */
1594 if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
1595 return(retval);
1596 }
1597
1598 /*
1599 * Not sure why someone would do this...
1600 */
1601 if (cur_pattern->flags == BUS_MATCH_NONE)
1602 continue;
1603
1604 if (((cur_pattern->flags & BUS_MATCH_PATH) != 0)
1605 && (cur_pattern->path_id != bus->path_id))
1606 continue;
1607
1608 if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0)
1609 && (cur_pattern->bus_id != bus->sim->bus_id))
1610 continue;
1611
1612 if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0)
1613 && (cur_pattern->unit_number != bus->sim->unit_number))
1614 continue;
1615
1616 if (((cur_pattern->flags & BUS_MATCH_NAME) != 0)
1617 && (strncmp(cur_pattern->dev_name, bus->sim->sim_name,
1618 DEV_IDLEN) != 0))
1619 continue;
1620
1621 /*
1622 * If we get to this point, the user definitely wants
1623 * information on this bus. So tell the caller to copy the
1624 * data out.
1625 */
1626 retval |= DM_RET_COPY;
1627
1628 /*
1629 * If the return action has been set to descend, then we
1630 * know that we've already seen a non-bus matching
1631 * expression, therefore we need to further descend the tree.
1632 * This won't change by continuing around the loop, so we
1633 * go ahead and return. If we haven't seen a non-bus
1634 * matching expression, we keep going around the loop until
1635 * we exhaust the matching expressions. We'll set the stop
1636 * flag once we fall out of the loop.
1637 */
1638 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1639 return(retval);
1640 }
1641
1642 /*
1643 * If the return action hasn't been set to descend yet, that means
1644 * we haven't seen anything other than bus matching patterns. So
1645 * tell the caller to stop descending the tree -- the user doesn't
1646 * want to match against lower level tree elements.
1647 */
1648 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1649 retval |= DM_RET_STOP;
1650
1651 return(retval);
1652 }
1653
1654 static dev_match_ret
1655 xptdevicematch(struct dev_match_pattern *patterns, int num_patterns,
1656 struct cam_ed *device)
1657 {
1658 dev_match_ret retval;
1659 int i;
1660
1661 retval = DM_RET_NONE;
1662
1663 /*
1664 * If we aren't given something to match against, that's an error.
1665 */
1666 if (device == NULL)
1667 return(DM_RET_ERROR);
1668
1669 /*
1670 * If there are no match entries, then this device matches no
1671 * matter what.
1672 */
1673 if ((patterns == NULL) || (patterns == 0))
1674 return(DM_RET_DESCEND | DM_RET_COPY);
1675
1676 for (i = 0; i < num_patterns; i++) {
1677 struct device_match_pattern *cur_pattern;
1678
1679 /*
1680 * If the pattern in question isn't for a device node, we
1681 * aren't interested.
1682 */
1683 if (patterns[i].type != DEV_MATCH_DEVICE) {
1684 if ((patterns[i].type == DEV_MATCH_PERIPH)
1685 && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE))
1686 retval |= DM_RET_DESCEND;
1687 continue;
1688 }
1689
1690 cur_pattern = &patterns[i].pattern.device_pattern;
1691
1692 /*
1693 * If they want to match any device node, we give them any
1694 * device node.
1695 */
1696 if (cur_pattern->flags == DEV_MATCH_ANY) {
1697 /* set the copy flag */
1698 retval |= DM_RET_COPY;
1699
1700
1701 /*
1702 * If we've already decided on an action, go ahead
1703 * and return.
1704 */
1705 if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
1706 return(retval);
1707 }
1708
1709 /*
1710 * Not sure why someone would do this...
1711 */
1712 if (cur_pattern->flags == DEV_MATCH_NONE)
1713 continue;
1714
1715 if (((cur_pattern->flags & DEV_MATCH_PATH) != 0)
1716 && (cur_pattern->path_id != device->target->bus->path_id))
1717 continue;
1718
1719 if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0)
1720 && (cur_pattern->target_id != device->target->target_id))
1721 continue;
1722
1723 if (((cur_pattern->flags & DEV_MATCH_LUN) != 0)
1724 && (cur_pattern->target_lun != device->lun_id))
1725 continue;
1726
1727 if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0)
1728 && (cam_quirkmatch((caddr_t)&device->inq_data,
1729 (caddr_t)&cur_pattern->inq_pat,
1730 1, sizeof(cur_pattern->inq_pat),
1731 scsi_static_inquiry_match) == NULL))
1732 continue;
1733
1734 /*
1735 * If we get to this point, the user definitely wants
1736 * information on this device. So tell the caller to copy
1737 * the data out.
1738 */
1739 retval |= DM_RET_COPY;
1740
1741 /*
1742 * If the return action has been set to descend, then we
1743 * know that we've already seen a peripheral matching
1744 * expression, therefore we need to further descend the tree.
1745 * This won't change by continuing around the loop, so we
1746 * go ahead and return. If we haven't seen a peripheral
1747 * matching expression, we keep going around the loop until
1748 * we exhaust the matching expressions. We'll set the stop
1749 * flag once we fall out of the loop.
1750 */
1751 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1752 return(retval);
1753 }
1754
1755 /*
1756 * If the return action hasn't been set to descend yet, that means
1757 * we haven't seen any peripheral matching patterns. So tell the
1758 * caller to stop descending the tree -- the user doesn't want to
1759 * match against lower level tree elements.
1760 */
1761 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1762 retval |= DM_RET_STOP;
1763
1764 return(retval);
1765 }
1766
1767 /*
1768 * Match a single peripheral against any number of match patterns.
1769 */
1770 static dev_match_ret
1771 xptperiphmatch(struct dev_match_pattern *patterns, int num_patterns,
1772 struct cam_periph *periph)
1773 {
1774 dev_match_ret retval;
1775 int i;
1776
1777 /*
1778 * If we aren't given something to match against, that's an error.
1779 */
1780 if (periph == NULL)
1781 return(DM_RET_ERROR);
1782
1783 /*
1784 * If there are no match entries, then this peripheral matches no
1785 * matter what.
1786 */
1787 if ((patterns == NULL) || (num_patterns == 0))
1788 return(DM_RET_STOP | DM_RET_COPY);
1789
1790 /*
1791 * There aren't any nodes below a peripheral node, so there's no
1792 * reason to descend the tree any further.
1793 */
1794 retval = DM_RET_STOP;
1795
1796 for (i = 0; i < num_patterns; i++) {
1797 struct periph_match_pattern *cur_pattern;
1798
1799 /*
1800 * If the pattern in question isn't for a peripheral, we
1801 * aren't interested.
1802 */
1803 if (patterns[i].type != DEV_MATCH_PERIPH)
1804 continue;
1805
1806 cur_pattern = &patterns[i].pattern.periph_pattern;
1807
1808 /*
1809 * If they want to match on anything, then we will do so.
1810 */
1811 if (cur_pattern->flags == PERIPH_MATCH_ANY) {
1812 /* set the copy flag */
1813 retval |= DM_RET_COPY;
1814
1815 /*
1816 * We've already set the return action to stop,
1817 * since there are no nodes below peripherals in
1818 * the tree.
1819 */
1820 return(retval);
1821 }
1822
1823 /*
1824 * Not sure why someone would do this...
1825 */
1826 if (cur_pattern->flags == PERIPH_MATCH_NONE)
1827 continue;
1828
1829 if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0)
1830 && (cur_pattern->path_id != periph->path->bus->path_id))
1831 continue;
1832
1833 /*
1834 * For the target and lun id's, we have to make sure the
1835 * target and lun pointers aren't NULL. The xpt peripheral
1836 * has a wildcard target and device.
1837 */
1838 if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0)
1839 && ((periph->path->target == NULL)
1840 ||(cur_pattern->target_id != periph->path->target->target_id)))
1841 continue;
1842
1843 if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0)
1844 && ((periph->path->device == NULL)
1845 || (cur_pattern->target_lun != periph->path->device->lun_id)))
1846 continue;
1847
1848 if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0)
1849 && (cur_pattern->unit_number != periph->unit_number))
1850 continue;
1851
1852 if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0)
1853 && (strncmp(cur_pattern->periph_name, periph->periph_name,
1854 DEV_IDLEN) != 0))
1855 continue;
1856
1857 /*
1858 * If we get to this point, the user definitely wants
1859 * information on this peripheral. So tell the caller to
1860 * copy the data out.
1861 */
1862 retval |= DM_RET_COPY;
1863
1864 /*
1865 * The return action has already been set to stop, since
1866 * peripherals don't have any nodes below them in the EDT.
1867 */
1868 return(retval);
1869 }
1870
1871 /*
1872 * If we get to this point, the peripheral that was passed in
1873 * doesn't match any of the patterns.
1874 */
1875 return(retval);
1876 }
1877
1878 static int
1879 xptedtbusfunc(struct cam_eb *bus, void *arg)
1880 {
1881 struct ccb_dev_match *cdm;
1882 dev_match_ret retval;
1883
1884 cdm = (struct ccb_dev_match *)arg;
1885
1886 /*
1887 * If our position is for something deeper in the tree, that means
1888 * that we've already seen this node. So, we keep going down.
1889 */
1890 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1891 && (cdm->pos.cookie.bus == bus)
1892 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1893 && (cdm->pos.cookie.target != NULL))
1894 retval = DM_RET_DESCEND;
1895 else
1896 retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus);
1897
1898 /*
1899 * If we got an error, bail out of the search.
1900 */
1901 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
1902 cdm->status = CAM_DEV_MATCH_ERROR;
1903 return(0);
1904 }
1905
1906 /*
1907 * If the copy flag is set, copy this bus out.
1908 */
1909 if (retval & DM_RET_COPY) {
1910 int spaceleft, j;
1911
1912 spaceleft = cdm->match_buf_len - (cdm->num_matches *
1913 sizeof(struct dev_match_result));
1914
1915 /*
1916 * If we don't have enough space to put in another
1917 * match result, save our position and tell the
1918 * user there are more devices to check.
1919 */
1920 if (spaceleft < sizeof(struct dev_match_result)) {
1921 bzero(&cdm->pos, sizeof(cdm->pos));
1922 cdm->pos.position_type =
1923 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS;
1924
1925 cdm->pos.cookie.bus = bus;
1926 cdm->pos.generations[CAM_BUS_GENERATION]=
1927 bus_generation;
1928 cdm->status = CAM_DEV_MATCH_MORE;
1929 return(0);
1930 }
1931 j = cdm->num_matches;
1932 cdm->num_matches++;
1933 cdm->matches[j].type = DEV_MATCH_BUS;
1934 cdm->matches[j].result.bus_result.path_id = bus->path_id;
1935 cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id;
1936 cdm->matches[j].result.bus_result.unit_number =
1937 bus->sim->unit_number;
1938 strncpy(cdm->matches[j].result.bus_result.dev_name,
1939 bus->sim->sim_name, DEV_IDLEN);
1940 }
1941
1942 /*
1943 * If the user is only interested in busses, there's no
1944 * reason to descend to the next level in the tree.
1945 */
1946 if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
1947 return(1);
1948
1949 /*
1950 * If there is a target generation recorded, check it to
1951 * make sure the target list hasn't changed.
1952 */
1953 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1954 && (bus == cdm->pos.cookie.bus)
1955 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1956 && (cdm->pos.generations[CAM_TARGET_GENERATION] != 0)
1957 && (cdm->pos.generations[CAM_TARGET_GENERATION] !=
1958 bus->generation)) {
1959 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1960 return(0);
1961 }
1962
1963 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1964 && (cdm->pos.cookie.bus == bus)
1965 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1966 && (cdm->pos.cookie.target != NULL))
1967 return(xpttargettraverse(bus,
1968 (struct cam_et *)cdm->pos.cookie.target,
1969 xptedttargetfunc, arg));
1970 else
1971 return(xpttargettraverse(bus, NULL, xptedttargetfunc, arg));
1972 }
1973
1974 static int
1975 xptedttargetfunc(struct cam_et *target, void *arg)
1976 {
1977 struct ccb_dev_match *cdm;
1978
1979 cdm = (struct ccb_dev_match *)arg;
1980
1981 /*
1982 * If there is a device list generation recorded, check it to
1983 * make sure the device list hasn't changed.
1984 */
1985 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1986 && (cdm->pos.cookie.bus == target->bus)
1987 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1988 && (cdm->pos.cookie.target == target)
1989 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
1990 && (cdm->pos.generations[CAM_DEV_GENERATION] != 0)
1991 && (cdm->pos.generations[CAM_DEV_GENERATION] !=
1992 target->generation)) {
1993 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1994 return(0);
1995 }
1996
1997 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1998 && (cdm->pos.cookie.bus == target->bus)
1999 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2000 && (cdm->pos.cookie.target == target)
2001 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2002 && (cdm->pos.cookie.device != NULL))
2003 return(xptdevicetraverse(target,
2004 (struct cam_ed *)cdm->pos.cookie.device,
2005 xptedtdevicefunc, arg));
2006 else
2007 return(xptdevicetraverse(target, NULL, xptedtdevicefunc, arg));
2008 }
2009
2010 static int
2011 xptedtdevicefunc(struct cam_ed *device, void *arg)
2012 {
2013
2014 struct ccb_dev_match *cdm;
2015 dev_match_ret retval;
2016
2017 cdm = (struct ccb_dev_match *)arg;
2018
2019 /*
2020 * If our position is for something deeper in the tree, that means
2021 * that we've already seen this node. So, we keep going down.
2022 */
2023 if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2024 && (cdm->pos.cookie.device == device)
2025 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2026 && (cdm->pos.cookie.periph != NULL))
2027 retval = DM_RET_DESCEND;
2028 else
2029 retval = xptdevicematch(cdm->patterns, cdm->num_patterns,
2030 device);
2031
2032 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2033 cdm->status = CAM_DEV_MATCH_ERROR;
2034 return(0);
2035 }
2036
2037 /*
2038 * If the copy flag is set, copy this device out.
2039 */
2040 if (retval & DM_RET_COPY) {
2041 int spaceleft, j;
2042
2043 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2044 sizeof(struct dev_match_result));
2045
2046 /*
2047 * If we don't have enough space to put in another
2048 * match result, save our position and tell the
2049 * user there are more devices to check.
2050 */
2051 if (spaceleft < sizeof(struct dev_match_result)) {
2052 bzero(&cdm->pos, sizeof(cdm->pos));
2053 cdm->pos.position_type =
2054 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
2055 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE;
2056
2057 cdm->pos.cookie.bus = device->target->bus;
2058 cdm->pos.generations[CAM_BUS_GENERATION]=
2059 bus_generation;
2060 cdm->pos.cookie.target = device->target;
2061 cdm->pos.generations[CAM_TARGET_GENERATION] =
2062 device->target->bus->generation;
2063 cdm->pos.cookie.device = device;
2064 cdm->pos.generations[CAM_DEV_GENERATION] =
2065 device->target->generation;
2066 cdm->status = CAM_DEV_MATCH_MORE;
2067 return(0);
2068 }
2069 j = cdm->num_matches;
2070 cdm->num_matches++;
2071 cdm->matches[j].type = DEV_MATCH_DEVICE;
2072 cdm->matches[j].result.device_result.path_id =
2073 device->target->bus->path_id;
2074 cdm->matches[j].result.device_result.target_id =
2075 device->target->target_id;
2076 cdm->matches[j].result.device_result.target_lun =
2077 device->lun_id;
2078 bcopy(&device->inq_data,
2079 &cdm->matches[j].result.device_result.inq_data,
2080 sizeof(struct scsi_inquiry_data));
2081
2082 /* Let the user know whether this device is unconfigured */
2083 if (device->flags & CAM_DEV_UNCONFIGURED)
2084 cdm->matches[j].result.device_result.flags =
2085 DEV_RESULT_UNCONFIGURED;
2086 else
2087 cdm->matches[j].result.device_result.flags =
2088 DEV_RESULT_NOFLAG;
2089 }
2090
2091 /*
2092 * If the user isn't interested in peripherals, don't descend
2093 * the tree any further.
2094 */
2095 if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
2096 return(1);
2097
2098 /*
2099 * If there is a peripheral list generation recorded, make sure
2100 * it hasn't changed.
2101 */
2102 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2103 && (device->target->bus == cdm->pos.cookie.bus)
2104 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2105 && (device->target == cdm->pos.cookie.target)
2106 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2107 && (device == cdm->pos.cookie.device)
2108 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2109 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
2110 && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
2111 device->generation)){
2112 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2113 return(0);
2114 }
2115
2116 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2117 && (cdm->pos.cookie.bus == device->target->bus)
2118 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2119 && (cdm->pos.cookie.target == device->target)
2120 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2121 && (cdm->pos.cookie.device == device)
2122 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2123 && (cdm->pos.cookie.periph != NULL))
2124 return(xptperiphtraverse(device,
2125 (struct cam_periph *)cdm->pos.cookie.periph,
2126 xptedtperiphfunc, arg));
2127 else
2128 return(xptperiphtraverse(device, NULL, xptedtperiphfunc, arg));
2129 }
2130
2131 static int
2132 xptedtperiphfunc(struct cam_periph *periph, void *arg)
2133 {
2134 struct ccb_dev_match *cdm;
2135 dev_match_ret retval;
2136
2137 cdm = (struct ccb_dev_match *)arg;
2138
2139 retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
2140
2141 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2142 cdm->status = CAM_DEV_MATCH_ERROR;
2143 return(0);
2144 }
2145
2146 /*
2147 * If the copy flag is set, copy this peripheral out.
2148 */
2149 if (retval & DM_RET_COPY) {
2150 int spaceleft, j;
2151
2152 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2153 sizeof(struct dev_match_result));
2154
2155 /*
2156 * If we don't have enough space to put in another
2157 * match result, save our position and tell the
2158 * user there are more devices to check.
2159 */
2160 if (spaceleft < sizeof(struct dev_match_result)) {
2161 bzero(&cdm->pos, sizeof(cdm->pos));
2162 cdm->pos.position_type =
2163 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
2164 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE |
2165 CAM_DEV_POS_PERIPH;
2166
2167 cdm->pos.cookie.bus = periph->path->bus;
2168 cdm->pos.generations[CAM_BUS_GENERATION]=
2169 bus_generation;
2170 cdm->pos.cookie.target = periph->path->target;
2171 cdm->pos.generations[CAM_TARGET_GENERATION] =
2172 periph->path->bus->generation;
2173 cdm->pos.cookie.device = periph->path->device;
2174 cdm->pos.generations[CAM_DEV_GENERATION] =
2175 periph->path->target->generation;
2176 cdm->pos.cookie.periph = periph;
2177 cdm->pos.generations[CAM_PERIPH_GENERATION] =
2178 periph->path->device->generation;
2179 cdm->status = CAM_DEV_MATCH_MORE;
2180 return(0);
2181 }
2182
2183 j = cdm->num_matches;
2184 cdm->num_matches++;
2185 cdm->matches[j].type = DEV_MATCH_PERIPH;
2186 cdm->matches[j].result.periph_result.path_id =
2187 periph->path->bus->path_id;
2188 cdm->matches[j].result.periph_result.target_id =
2189 periph->path->target->target_id;
2190 cdm->matches[j].result.periph_result.target_lun =
2191 periph->path->device->lun_id;
2192 cdm->matches[j].result.periph_result.unit_number =
2193 periph->unit_number;
2194 strncpy(cdm->matches[j].result.periph_result.periph_name,
2195 periph->periph_name, DEV_IDLEN);
2196 }
2197
2198 return(1);
2199 }
2200
2201 static int
2202 xptedtmatch(struct ccb_dev_match *cdm)
2203 {
2204 int ret;
2205
2206 cdm->num_matches = 0;
2207
2208 /*
2209 * Check the bus list generation. If it has changed, the user
2210 * needs to reset everything and start over.
2211 */
2212 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2213 && (cdm->pos.generations[CAM_BUS_GENERATION] != 0)
2214 && (cdm->pos.generations[CAM_BUS_GENERATION] != bus_generation)) {
2215 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2216 return(0);
2217 }
2218
2219 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2220 && (cdm->pos.cookie.bus != NULL))
2221 ret = xptbustraverse((struct cam_eb *)cdm->pos.cookie.bus,
2222 xptedtbusfunc, cdm);
2223 else
2224 ret = xptbustraverse(NULL, xptedtbusfunc, cdm);
2225
2226 /*
2227 * If we get back 0, that means that we had to stop before fully
2228 * traversing the EDT. It also means that one of the subroutines
2229 * has set the status field to the proper value. If we get back 1,
2230 * we've fully traversed the EDT and copied out any matching entries.
2231 */
2232 if (ret == 1)
2233 cdm->status = CAM_DEV_MATCH_LAST;
2234
2235 return(ret);
2236 }
2237
2238 static int
2239 xptplistpdrvfunc(struct periph_driver **pdrv, void *arg)
2240 {
2241 struct ccb_dev_match *cdm;
2242
2243 cdm = (struct ccb_dev_match *)arg;
2244
2245 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2246 && (cdm->pos.cookie.pdrv == pdrv)
2247 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2248 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
2249 && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
2250 (*pdrv)->generation)) {
2251 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2252 return(0);
2253 }
2254
2255 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2256 && (cdm->pos.cookie.pdrv == pdrv)
2257 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2258 && (cdm->pos.cookie.periph != NULL))
2259 return(xptpdperiphtraverse(pdrv,
2260 (struct cam_periph *)cdm->pos.cookie.periph,
2261 xptplistperiphfunc, arg));
2262 else
2263 return(xptpdperiphtraverse(pdrv, NULL,xptplistperiphfunc, arg));
2264 }
2265
2266 static int
2267 xptplistperiphfunc(struct cam_periph *periph, void *arg)
2268 {
2269 struct ccb_dev_match *cdm;
2270 dev_match_ret retval;
2271
2272 cdm = (struct ccb_dev_match *)arg;
2273
2274 retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
2275
2276 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2277 cdm->status = CAM_DEV_MATCH_ERROR;
2278 return(0);
2279 }
2280
2281 /*
2282 * If the copy flag is set, copy this peripheral out.
2283 */
2284 if (retval & DM_RET_COPY) {
2285 int spaceleft, j;
2286
2287 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2288 sizeof(struct dev_match_result));
2289
2290 /*
2291 * If we don't have enough space to put in another
2292 * match result, save our position and tell the
2293 * user there are more devices to check.
2294 */
2295 if (spaceleft < sizeof(struct dev_match_result)) {
2296 struct periph_driver **pdrv;
2297
2298 pdrv = NULL;
2299 bzero(&cdm->pos, sizeof(cdm->pos));
2300 cdm->pos.position_type =
2301 CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR |
2302 CAM_DEV_POS_PERIPH;
2303
2304 /*
2305 * This may look a bit non-sensical, but it is
2306 * actually quite logical. There are very few
2307 * peripheral drivers, and bloating every peripheral
2308 * structure with a pointer back to its parent
2309 * peripheral driver linker set entry would cost
2310 * more in the long run than doing this quick lookup.
2311 */
2312 for (pdrv =
2313 (struct periph_driver **)periphdriver_set.ls_items;
2314 *pdrv != NULL; pdrv++) {
2315 if (strcmp((*pdrv)->driver_name,
2316 periph->periph_name) == 0)
2317 break;
2318 }
2319
2320 if (pdrv == NULL) {
2321 cdm->status = CAM_DEV_MATCH_ERROR;
2322 return(0);
2323 }
2324
2325 cdm->pos.cookie.pdrv = pdrv;
2326 /*
2327 * The periph generation slot does double duty, as
2328 * does the periph pointer slot. They are used for
2329 * both edt and pdrv lookups and positioning.
2330 */
2331 cdm->pos.cookie.periph = periph;
2332 cdm->pos.generations[CAM_PERIPH_GENERATION] =
2333 (*pdrv)->generation;
2334 cdm->status = CAM_DEV_MATCH_MORE;
2335 return(0);
2336 }
2337
2338 j = cdm->num_matches;
2339 cdm->num_matches++;
2340 cdm->matches[j].type = DEV_MATCH_PERIPH;
2341 cdm->matches[j].result.periph_result.path_id =
2342 periph->path->bus->path_id;
2343
2344 /*
2345 * The transport layer peripheral doesn't have a target or
2346 * lun.
2347 */
2348 if (periph->path->target)
2349 cdm->matches[j].result.periph_result.target_id =
2350 periph->path->target->target_id;
2351 else
2352 cdm->matches[j].result.periph_result.target_id = -1;
2353
2354 if (periph->path->device)
2355 cdm->matches[j].result.periph_result.target_lun =
2356 periph->path->device->lun_id;
2357 else
2358 cdm->matches[j].result.periph_result.target_lun = -1;
2359
2360 cdm->matches[j].result.periph_result.unit_number =
2361 periph->unit_number;
2362 strncpy(cdm->matches[j].result.periph_result.periph_name,
2363 periph->periph_name, DEV_IDLEN);
2364 }
2365
2366 return(1);
2367 }
2368
2369 static int
2370 xptperiphlistmatch(struct ccb_dev_match *cdm)
2371 {
2372 int ret;
2373
2374 cdm->num_matches = 0;
2375
2376 /*
2377 * At this point in the edt traversal function, we check the bus
2378 * list generation to make sure that no busses have been added or
2379 * removed since the user last sent a XPT_DEV_MATCH ccb through.
2380 * For the peripheral driver list traversal function, however, we
2381 * don't have to worry about new peripheral driver types coming or
2382 * going; they're in a linker set, and therefore can't change
2383 * without a recompile.
2384 */
2385
2386 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2387 && (cdm->pos.cookie.pdrv != NULL))
2388 ret = xptpdrvtraverse(
2389 (struct periph_driver **)cdm->pos.cookie.pdrv,
2390 xptplistpdrvfunc, cdm);
2391 else
2392 ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm);
2393
2394 /*
2395 * If we get back 0, that means that we had to stop before fully
2396 * traversing the peripheral driver tree. It also means that one of
2397 * the subroutines has set the status field to the proper value. If
2398 * we get back 1, we've fully traversed the EDT and copied out any
2399 * matching entries.
2400 */
2401 if (ret == 1)
2402 cdm->status = CAM_DEV_MATCH_LAST;
2403
2404 return(ret);
2405 }
2406
2407 static int
2408 xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg)
2409 {
2410 struct cam_eb *bus, *next_bus;
2411 int retval;
2412
2413 retval = 1;
2414
2415 for (bus = (start_bus ? start_bus : TAILQ_FIRST(&xpt_busses));
2416 bus != NULL;
2417 bus = next_bus) {
2418 next_bus = TAILQ_NEXT(bus, links);
2419
2420 retval = tr_func(bus, arg);
2421 if (retval == 0)
2422 return(retval);
2423 }
2424
2425 return(retval);
2426 }
2427
2428 static int
2429 xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target,
2430 xpt_targetfunc_t *tr_func, void *arg)
2431 {
2432 struct cam_et *target, *next_target;
2433 int retval;
2434
2435 retval = 1;
2436 for (target = (start_target ? start_target :
2437 TAILQ_FIRST(&bus->et_entries));
2438 target != NULL; target = next_target) {
2439
2440 next_target = TAILQ_NEXT(target, links);
2441
2442 retval = tr_func(target, arg);
2443
2444 if (retval == 0)
2445 return(retval);
2446 }
2447
2448 return(retval);
2449 }
2450
2451 static int
2452 xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device,
2453 xpt_devicefunc_t *tr_func, void *arg)
2454 {
2455 struct cam_ed *device, *next_device;
2456 int retval;
2457
2458 retval = 1;
2459 for (device = (start_device ? start_device :
2460 TAILQ_FIRST(&target->ed_entries));
2461 device != NULL;
2462 device = next_device) {
2463
2464 next_device = TAILQ_NEXT(device, links);
2465
2466 retval = tr_func(device, arg);
2467
2468 if (retval == 0)
2469 return(retval);
2470 }
2471
2472 return(retval);
2473 }
2474
2475 static int
2476 xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph,
2477 xpt_periphfunc_t *tr_func, void *arg)
2478 {
2479 struct cam_periph *periph, *next_periph;
2480 int retval;
2481
2482 retval = 1;
2483
2484 for (periph = (start_periph ? start_periph :
2485 SLIST_FIRST(&device->periphs));
2486 periph != NULL;
2487 periph = next_periph) {
2488
2489 next_periph = SLIST_NEXT(periph, periph_links);
2490
2491 retval = tr_func(periph, arg);
2492 if (retval == 0)
2493 return(retval);
2494 }
2495
2496 return(retval);
2497 }
2498
2499 static int
2500 xptpdrvtraverse(struct periph_driver **start_pdrv,
2501 xpt_pdrvfunc_t *tr_func, void *arg)
2502 {
2503 struct periph_driver **pdrv;
2504 int retval;
2505
2506 retval = 1;
2507
2508 /*
2509 * We don't traverse the peripheral driver list like we do the
2510 * other lists, because it is a linker set, and therefore cannot be
2511 * changed during runtime. If the peripheral driver list is ever
2512 * re-done to be something other than a linker set (i.e. it can
2513 * change while the system is running), the list traversal should
2514 * be modified to work like the other traversal functions.
2515 */
2516 for (pdrv = (start_pdrv ? start_pdrv :
2517 (struct periph_driver **)periphdriver_set.ls_items);
2518 *pdrv != NULL; pdrv++) {
2519 retval = tr_func(pdrv, arg);
2520
2521 if (retval == 0)
2522 return(retval);
2523 }
2524
2525 return(retval);
2526 }
2527
2528 static int
2529 xptpdperiphtraverse(struct periph_driver **pdrv,
2530 struct cam_periph *start_periph,
2531 xpt_periphfunc_t *tr_func, void *arg)
2532 {
2533 struct cam_periph *periph, *next_periph;
2534 int retval;
2535
2536 retval = 1;
2537
2538 for (periph = (start_periph ? start_periph :
2539 TAILQ_FIRST(&(*pdrv)->units)); periph != NULL;
2540 periph = next_periph) {
2541
2542 next_periph = TAILQ_NEXT(periph, unit_links);
2543
2544 retval = tr_func(periph, arg);
2545 if (retval == 0)
2546 return(retval);
2547 }
2548 return(retval);
2549 }
2550
2551 static int
2552 xptdefbusfunc(struct cam_eb *bus, void *arg)
2553 {
2554 struct xpt_traverse_config *tr_config;
2555
2556 tr_config = (struct xpt_traverse_config *)arg;
2557
2558 if (tr_config->depth == XPT_DEPTH_BUS) {
2559 xpt_busfunc_t *tr_func;
2560
2561 tr_func = (xpt_busfunc_t *)tr_config->tr_func;
2562
2563 return(tr_func(bus, tr_config->tr_arg));
2564 } else
2565 return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg));
2566 }
2567
2568 static int
2569 xptdeftargetfunc(struct cam_et *target, void *arg)
2570 {
2571 struct xpt_traverse_config *tr_config;
2572
2573 tr_config = (struct xpt_traverse_config *)arg;
2574
2575 if (tr_config->depth == XPT_DEPTH_TARGET) {
2576 xpt_targetfunc_t *tr_func;
2577
2578 tr_func = (xpt_targetfunc_t *)tr_config->tr_func;
2579
2580 return(tr_func(target, tr_config->tr_arg));
2581 } else
2582 return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg));
2583 }
2584
2585 static int
2586 xptdefdevicefunc(struct cam_ed *device, void *arg)
2587 {
2588 struct xpt_traverse_config *tr_config;
2589
2590 tr_config = (struct xpt_traverse_config *)arg;
2591
2592 if (tr_config->depth == XPT_DEPTH_DEVICE) {
2593 xpt_devicefunc_t *tr_func;
2594
2595 tr_func = (xpt_devicefunc_t *)tr_config->tr_func;
2596
2597 return(tr_func(device, tr_config->tr_arg));
2598 } else
2599 return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg));
2600 }
2601
2602 static int
2603 xptdefperiphfunc(struct cam_periph *periph, void *arg)
2604 {
2605 struct xpt_traverse_config *tr_config;
2606 xpt_periphfunc_t *tr_func;
2607
2608 tr_config = (struct xpt_traverse_config *)arg;
2609
2610 tr_func = (xpt_periphfunc_t *)tr_config->tr_func;
2611
2612 /*
2613 * Unlike the other default functions, we don't check for depth
2614 * here. The peripheral driver level is the last level in the EDT,
2615 * so if we're here, we should execute the function in question.
2616 */
2617 return(tr_func(periph, tr_config->tr_arg));
2618 }
2619
2620 /*
2621 * Execute the given function for every bus in the EDT.
2622 */
2623 static int
2624 xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg)
2625 {
2626 struct xpt_traverse_config tr_config;
2627
2628 tr_config.depth = XPT_DEPTH_BUS;
2629 tr_config.tr_func = tr_func;
2630 tr_config.tr_arg = arg;
2631
2632 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2633 }
2634
2635 #ifdef notusedyet
2636 /*
2637 * Execute the given function for every target in the EDT.
2638 */
2639 static int
2640 xpt_for_all_targets(xpt_targetfunc_t *tr_func, void *arg)
2641 {
2642 struct xpt_traverse_config tr_config;
2643
2644 tr_config.depth = XPT_DEPTH_TARGET;
2645 tr_config.tr_func = tr_func;
2646 tr_config.tr_arg = arg;
2647
2648 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2649 }
2650 #endif /* notusedyet */
2651
2652 /*
2653 * Execute the given function for every device in the EDT.
2654 */
2655 static int
2656 xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg)
2657 {
2658 struct xpt_traverse_config tr_config;
2659
2660 tr_config.depth = XPT_DEPTH_DEVICE;
2661 tr_config.tr_func = tr_func;
2662 tr_config.tr_arg = arg;
2663
2664 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2665 }
2666
2667 #ifdef notusedyet
2668 /*
2669 * Execute the given function for every peripheral in the EDT.
2670 */
2671 static int
2672 xpt_for_all_periphs(xpt_periphfunc_t *tr_func, void *arg)
2673 {
2674 struct xpt_traverse_config tr_config;
2675
2676 tr_config.depth = XPT_DEPTH_PERIPH;
2677 tr_config.tr_func = tr_func;
2678 tr_config.tr_arg = arg;
2679
2680 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2681 }
2682 #endif /* notusedyet */
2683
2684 static int
2685 xptsetasyncfunc(struct cam_ed *device, void *arg)
2686 {
2687 struct cam_path path;
2688 struct ccb_getdev cgd;
2689 struct async_node *cur_entry;
2690
2691 cur_entry = (struct async_node *)arg;
2692
2693 /*
2694 * Don't report unconfigured devices (Wildcard devs,
2695 * devices only for target mode, device instances
2696 * that have been invalidated but are waiting for
2697 * their last reference count to be released).
2698 */
2699 if ((device->flags & CAM_DEV_UNCONFIGURED) != 0)
2700 return (1);
2701
2702 xpt_compile_path(&path,
2703 NULL,
2704 device->target->bus->path_id,
2705 device->target->target_id,
2706 device->lun_id);
2707 xpt_setup_ccb(&cgd.ccb_h, &path, /*priority*/1);
2708 cgd.ccb_h.func_code = XPT_GDEV_TYPE;
2709 xpt_action((union ccb *)&cgd);
2710 cur_entry->callback(cur_entry->callback_arg,
2711 AC_FOUND_DEVICE,
2712 &path, &cgd);
2713 xpt_release_path(&path);
2714
2715 return(1);
2716 }
2717
2718 static int
2719 xptsetasyncbusfunc(struct cam_eb *bus, void *arg)
2720 {
2721 struct cam_path path;
2722 struct ccb_pathinq cpi;
2723 struct async_node *cur_entry;
2724
2725 cur_entry = (struct async_node *)arg;
2726
2727 xpt_compile_path(&path, /*periph*/NULL,
2728 bus->sim->path_id,
2729 CAM_TARGET_WILDCARD,
2730 CAM_LUN_WILDCARD);
2731 xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
2732 cpi.ccb_h.func_code = XPT_PATH_INQ;
2733 xpt_action((union ccb *)&cpi);
2734 cur_entry->callback(cur_entry->callback_arg,
2735 AC_PATH_REGISTERED,
2736 &path, &cpi);
2737 xpt_release_path(&path);
2738
2739 return(1);
2740 }
2741
2742 void
2743 xpt_action(union ccb *start_ccb)
2744 {
2745 int iopl;
2746
2747 CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_action\n"));
2748
2749 start_ccb->ccb_h.status = CAM_REQ_INPROG;
2750
2751 iopl = splsoftcam();
2752 switch (start_ccb->ccb_h.func_code) {
2753 case XPT_SCSI_IO:
2754 {
2755 #ifdef CAMDEBUG
2756 char cdb_str[(SCSI_MAX_CDBLEN * 3) + 1];
2757 struct cam_path *path;
2758
2759 path = start_ccb->ccb_h.path;
2760 #endif
2761
2762 /*
2763 * For the sake of compatibility with SCSI-1
2764 * devices that may not understand the identify
2765 * message, we include lun information in the
2766 * second byte of all commands. SCSI-1 specifies
2767 * that luns are a 3 bit value and reserves only 3
2768 * bits for lun information in the CDB. Later
2769 * revisions of the SCSI spec allow for more than 8
2770 * luns, but have deprecated lun information in the
2771 * CDB. So, if the lun won't fit, we must omit.
2772 *
2773 * Also be aware that during initial probing for devices,
2774 * the inquiry information is unknown but initialized to 0.
2775 * This means that this code will be exercised while probing
2776 * devices with an ANSI revision greater than 2.
2777 */
2778 if (SID_ANSI_REV(&start_ccb->ccb_h.path->device->inq_data) <= 2
2779 && start_ccb->ccb_h.target_lun < 8
2780 && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) {
2781
2782 start_ccb->csio.cdb_io.cdb_bytes[1] |=
2783 start_ccb->ccb_h.target_lun << 5;
2784 }
2785 start_ccb->csio.scsi_status = SCSI_STATUS_OK;
2786 start_ccb->csio.sense_resid = 0;
2787 start_ccb->csio.resid = 0;
2788 CAM_DEBUG(path, CAM_DEBUG_CDB,("%s. CDB: %s\n",
2789 scsi_op_desc(start_ccb->csio.cdb_io.cdb_bytes[0],
2790 &path->device->inq_data),
2791 scsi_cdb_string(start_ccb->csio.cdb_io.cdb_bytes,
2792 cdb_str, sizeof(cdb_str))));
2793 /* FALLTRHOUGH */
2794 }
2795 case XPT_TARGET_IO:
2796 case XPT_CONT_TARGET_IO:
2797 case XPT_RESET_DEV:
2798 case XPT_ENG_EXEC:
2799 {
2800 struct cam_path *path;
2801 int s;
2802 int runq;
2803
2804 path = start_ccb->ccb_h.path;
2805 s = splsoftcam();
2806
2807 cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb);
2808 if (path->device->qfrozen_cnt == 0)
2809 runq = xpt_schedule_dev_sendq(path->bus, path->device);
2810 else
2811 runq = 0;
2812 splx(s);
2813 if (runq != 0)
2814 xpt_run_dev_sendq(path->bus);
2815 break;
2816 }
2817 case XPT_SET_TRAN_SETTINGS:
2818 {
2819 xpt_set_transfer_settings(&start_ccb->cts,
2820 start_ccb->ccb_h.path->device,
2821 /*async_update*/FALSE);
2822 break;
2823 }
2824 case XPT_CALC_GEOMETRY:
2825 /* Filter out garbage */
2826 if (start_ccb->ccg.block_size == 0
2827 || start_ccb->ccg.volume_size == 0) {
2828 start_ccb->ccg.cylinders = 0;
2829 start_ccb->ccg.heads = 0;
2830 start_ccb->ccg.secs_per_track = 0;
2831 start_ccb->ccb_h.status = CAM_REQ_CMP;
2832 break;
2833 }
2834 #ifdef PC98
2835 /*
2836 * In a PC-98 system, geometry translation depens on
2837 * the "real" device geometry obtained from mode page 4.
2838 * SCSI geometry translation is performed in the
2839 * initialization routine of the SCSI BIOS and the result
2840 * stored in host memory. If the translation is available
2841 * in host memory, use it. If not, rely on the default
2842 * translation the device driver performs.
2843 */
2844 if (scsi_da_bios_params(&start_ccb->ccg) != 0) {
2845 start_ccb->ccb_h.status = CAM_REQ_CMP;
2846 break;
2847 }
2848 /* FALLTHROUGH */
2849 #endif
2850 case XPT_ABORT:
2851 case XPT_ACCEPT_TARGET_IO:
2852 case XPT_EN_LUN:
2853 case XPT_IMMED_NOTIFY:
2854 case XPT_NOTIFY_ACK:
2855 case XPT_GET_TRAN_SETTINGS:
2856 case XPT_RESET_BUS:
2857 {
2858 struct cam_sim *sim;
2859
2860 sim = start_ccb->ccb_h.path->bus->sim;
2861 (*(sim->sim_action))(sim, start_ccb);
2862 break;
2863 }
2864 case XPT_PATH_INQ:
2865 {
2866 struct cam_sim *sim;
2867
2868 sim = start_ccb->ccb_h.path->bus->sim;
2869 (*(sim->sim_action))(sim, start_ccb);
2870 break;
2871 }
2872 case XPT_PATH_STATS:
2873 start_ccb->cpis.last_reset =
2874 start_ccb->ccb_h.path->bus->last_reset;
2875 start_ccb->ccb_h.status = CAM_REQ_CMP;
2876 break;
2877 case XPT_GDEV_TYPE:
2878 {
2879 struct cam_ed *dev;
2880 int s;
2881
2882 dev = start_ccb->ccb_h.path->device;
2883 s = splcam();
2884 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
2885 start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2886 } else {
2887 struct ccb_getdev *cgd;
2888 struct cam_eb *bus;
2889 struct cam_et *tar;
2890
2891 cgd = &start_ccb->cgd;
2892 bus = cgd->ccb_h.path->bus;
2893 tar = cgd->ccb_h.path->target;
2894 cgd->inq_data = dev->inq_data;
2895 cgd->pd_type = SID_TYPE(&dev->inq_data);
2896 #ifndef GARBAGE_COLLECT
2897 cgd->dev_openings = dev->ccbq.dev_openings;
2898 cgd->dev_active = dev->ccbq.dev_active;
2899 cgd->devq_openings = dev->ccbq.devq_openings;
2900 cgd->devq_queued = dev->ccbq.queue.entries;
2901 cgd->held = dev->ccbq.held;
2902 cgd->maxtags = dev->quirk->maxtags;
2903 cgd->mintags = dev->quirk->mintags;
2904 #endif
2905 cgd->ccb_h.status = CAM_REQ_CMP;
2906 cgd->serial_num_len = dev->serial_num_len;
2907 if ((dev->serial_num_len > 0)
2908 && (dev->serial_num != NULL))
2909 bcopy(dev->serial_num, cgd->serial_num,
2910 dev->serial_num_len);
2911 }
2912 splx(s);
2913 break;
2914 }
2915 case XPT_GDEV_STATS:
2916 {
2917 struct cam_ed *dev;
2918 int s;
2919
2920 dev = start_ccb->ccb_h.path->device;
2921 s = splcam();
2922 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
2923 start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2924 } else {
2925 struct ccb_getdevstats *cgds;
2926 struct cam_eb *bus;
2927 struct cam_et *tar;
2928
2929 cgds = &start_ccb->cgds;
2930 bus = cgds->ccb_h.path->bus;
2931 tar = cgds->ccb_h.path->target;
2932 cgds->dev_openings = dev->ccbq.dev_openings;
2933 cgds->dev_active = dev->ccbq.dev_active;
2934 cgds->devq_openings = dev->ccbq.devq_openings;
2935 cgds->devq_queued = dev->ccbq.queue.entries;
2936 cgds->held = dev->ccbq.held;
2937 cgds->last_reset = tar->last_reset;
2938 cgds->maxtags = dev->quirk->maxtags;
2939 cgds->mintags = dev->quirk->mintags;
2940 if (timevalcmp(&tar->last_reset, &bus->last_reset, <))
2941 cgds->last_reset = bus->last_reset;
2942 cgds->ccb_h.status = CAM_REQ_CMP;
2943 }
2944 splx(s);
2945 break;
2946 }
2947 case XPT_GDEVLIST:
2948 {
2949 struct cam_periph *nperiph;
2950 struct periph_list *periph_head;
2951 struct ccb_getdevlist *cgdl;
2952 int i;
2953 int s;
2954 struct cam_ed *device;
2955 int found;
2956
2957
2958 found = 0;
2959
2960 /*
2961 * Don't want anyone mucking with our data.
2962 */
2963 s = splcam();
2964 device = start_ccb->ccb_h.path->device;
2965 periph_head = &device->periphs;
2966 cgdl = &start_ccb->cgdl;
2967
2968 /*
2969 * Check and see if the list has changed since the user
2970 * last requested a list member. If so, tell them that the
2971 * list has changed, and therefore they need to start over
2972 * from the beginning.
2973 */
2974 if ((cgdl->index != 0) &&
2975 (cgdl->generation != device->generation)) {
2976 cgdl->status = CAM_GDEVLIST_LIST_CHANGED;
2977 splx(s);
2978 break;
2979 }
2980
2981 /*
2982 * Traverse the list of peripherals and attempt to find
2983 * the requested peripheral.
2984 */
2985 for (nperiph = periph_head->slh_first, i = 0;
2986 (nperiph != NULL) && (i <= cgdl->index);
2987 nperiph = nperiph->periph_links.sle_next, i++) {
2988 if (i == cgdl->index) {
2989 strncpy(cgdl->periph_name,
2990 nperiph->periph_name,
2991 DEV_IDLEN);
2992 cgdl->unit_number = nperiph->unit_number;
2993 found = 1;
2994 }
2995 }
2996 if (found == 0) {
2997 cgdl->status = CAM_GDEVLIST_ERROR;
2998 splx(s);
2999 break;
3000 }
3001
3002 if (nperiph == NULL)
3003 cgdl->status = CAM_GDEVLIST_LAST_DEVICE;
3004 else
3005 cgdl->status = CAM_GDEVLIST_MORE_DEVS;
3006
3007 cgdl->index++;
3008 cgdl->generation = device->generation;
3009
3010 splx(s);
3011 cgdl->ccb_h.status = CAM_REQ_CMP;
3012 break;
3013 }
3014 case XPT_DEV_MATCH:
3015 {
3016 int s;
3017 dev_pos_type position_type;
3018 struct ccb_dev_match *cdm;
3019 int ret;
3020
3021 cdm = &start_ccb->cdm;
3022
3023 /*
3024 * Prevent EDT changes while we traverse it.
3025 */
3026 s = splcam();
3027 /*
3028 * There are two ways of getting at information in the EDT.
3029 * The first way is via the primary EDT tree. It starts
3030 * with a list of busses, then a list of targets on a bus,
3031 * then devices/luns on a target, and then peripherals on a
3032 * device/lun. The "other" way is by the peripheral driver
3033 * lists. The peripheral driver lists are organized by
3034 * peripheral driver. (obviously) So it makes sense to
3035 * use the peripheral driver list if the user is looking
3036 * for something like "da1", or all "da" devices. If the
3037 * user is looking for something on a particular bus/target
3038 * or lun, it's generally better to go through the EDT tree.
3039 */
3040
3041 if (cdm->pos.position_type != CAM_DEV_POS_NONE)
3042 position_type = cdm->pos.position_type;
3043 else {
3044 int i;
3045
3046 position_type = CAM_DEV_POS_NONE;
3047
3048 for (i = 0; i < cdm->num_patterns; i++) {
3049 if ((cdm->patterns[i].type == DEV_MATCH_BUS)
3050 ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){
3051 position_type = CAM_DEV_POS_EDT;
3052 break;
3053 }
3054 }
3055
3056 if (cdm->num_patterns == 0)
3057 position_type = CAM_DEV_POS_EDT;
3058 else if (position_type == CAM_DEV_POS_NONE)
3059 position_type = CAM_DEV_POS_PDRV;
3060 }
3061
3062 switch(position_type & CAM_DEV_POS_TYPEMASK) {
3063 case CAM_DEV_POS_EDT:
3064 ret = xptedtmatch(cdm);
3065 break;
3066 case CAM_DEV_POS_PDRV:
3067 ret = xptperiphlistmatch(cdm);
3068 break;
3069 default:
3070 cdm->status = CAM_DEV_MATCH_ERROR;
3071 break;
3072 }
3073
3074 splx(s);
3075
3076 if (cdm->status == CAM_DEV_MATCH_ERROR)
3077 start_ccb->ccb_h.status = CAM_REQ_CMP_ERR;
3078 else
3079 start_ccb->ccb_h.status = CAM_REQ_CMP;
3080
3081 break;
3082 }
3083 case XPT_SASYNC_CB:
3084 {
3085 struct ccb_setasync *csa;
3086 struct async_node *cur_entry;
3087 struct async_list *async_head;
3088 u_int32_t added;
3089 int s;
3090
3091 csa = &start_ccb->csa;
3092 added = csa->event_enable;
3093 async_head = &csa->ccb_h.path->device->asyncs;
3094
3095 /*
3096 * If there is already an entry for us, simply
3097 * update it.
3098 */
3099 s = splcam();
3100 cur_entry = SLIST_FIRST(async_head);
3101 while (cur_entry != NULL) {
3102 if ((cur_entry->callback_arg == csa->callback_arg)
3103 && (cur_entry->callback == csa->callback))
3104 break;
3105 cur_entry = SLIST_NEXT(cur_entry, links);
3106 }
3107
3108 if (cur_entry != NULL) {
3109 /*
3110 * If the request has no flags set,
3111 * remove the entry.
3112 */
3113 added &= ~cur_entry->event_enable;
3114 if (csa->event_enable == 0) {
3115 SLIST_REMOVE(async_head, cur_entry,
3116 async_node, links);
3117 csa->ccb_h.path->device->refcount--;
3118 free(cur_entry, M_DEVBUF);
3119 } else {
3120 cur_entry->event_enable = csa->event_enable;
3121 }
3122 } else {
3123 cur_entry = malloc(sizeof(*cur_entry), M_DEVBUF,
3124 M_NOWAIT);
3125 if (cur_entry == NULL) {
3126 splx(s);
3127 csa->ccb_h.status = CAM_RESRC_UNAVAIL;
3128 break;
3129 }
3130 cur_entry->callback_arg = csa->callback_arg;
3131 cur_entry->callback = csa->callback;
3132 cur_entry->event_enable = csa->event_enable;
3133 SLIST_INSERT_HEAD(async_head, cur_entry, links);
3134 csa->ccb_h.path->device->refcount++;
3135 }
3136
3137 if ((added & AC_FOUND_DEVICE) != 0) {
3138 /*
3139 * Get this peripheral up to date with all
3140 * the currently existing devices.
3141 */
3142 xpt_for_all_devices(xptsetasyncfunc, cur_entry);
3143 }
3144 if ((added & AC_PATH_REGISTERED) != 0) {
3145 /*
3146 * Get this peripheral up to date with all
3147 * the currently existing busses.
3148 */
3149 xpt_for_all_busses(xptsetasyncbusfunc, cur_entry);
3150 }
3151 splx(s);
3152 start_ccb->ccb_h.status = CAM_REQ_CMP;
3153 break;
3154 }
3155 case XPT_REL_SIMQ:
3156 {
3157 struct ccb_relsim *crs;
3158 struct cam_ed *dev;
3159 int s;
3160
3161 crs = &start_ccb->crs;
3162 dev = crs->ccb_h.path->device;
3163 if (dev == NULL) {
3164
3165 crs->ccb_h.status = CAM_DEV_NOT_THERE;
3166 break;
3167 }
3168
3169 s = splcam();
3170
3171 if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) {
3172
3173 if ((dev->inq_data.flags & SID_CmdQue) != 0) {
3174
3175 /* Don't ever go below one opening */
3176 if (crs->openings > 0) {
3177 xpt_dev_ccbq_resize(crs->ccb_h.path,
3178 crs->openings);
3179
3180 if (bootverbose) {
3181 xpt_print_path(crs->ccb_h.path);
3182 printf("tagged openings "
3183 "now %d\n",
3184 crs->openings);
3185 }
3186 }
3187 }
3188 }
3189
3190 if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) {
3191
3192 if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
3193
3194 /*
3195 * Just extend the old timeout and decrement
3196 * the freeze count so that a single timeout
3197 * is sufficient for releasing the queue.
3198 */
3199 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3200 untimeout(xpt_release_devq_timeout,
3201 dev, dev->c_handle);
3202 } else {
3203
3204 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3205 }
3206
3207 dev->c_handle =
3208 timeout(xpt_release_devq_timeout,
3209 dev,
3210 (crs->release_timeout * hz) / 1000);
3211
3212 dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING;
3213
3214 }
3215
3216 if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) {
3217
3218 if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) {
3219 /*
3220 * Decrement the freeze count so that a single
3221 * completion is still sufficient to unfreeze
3222 * the queue.
3223 */
3224 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3225 } else {
3226
3227 dev->flags |= CAM_DEV_REL_ON_COMPLETE;
3228 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3229 }
3230 }
3231
3232 if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) {
3233
3234 if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
3235 || (dev->ccbq.dev_active == 0)) {
3236
3237 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3238 } else {
3239
3240 dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY;
3241 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3242 }
3243 }
3244 splx(s);
3245
3246 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0) {
3247
3248 xpt_release_devq(crs->ccb_h.path->device,
3249 /*run_queue*/TRUE);
3250 }
3251 start_ccb->crs.qfrozen_cnt = dev->qfrozen_cnt;
3252 start_ccb->ccb_h.status = CAM_REQ_CMP;
3253 break;
3254 }
3255 case XPT_SCAN_BUS:
3256 xpt_scan_bus(start_ccb->ccb_h.path->periph, start_ccb);
3257 break;
3258 case XPT_SCAN_LUN:
3259 xpt_scan_lun(start_ccb->ccb_h.path->periph,
3260 start_ccb->ccb_h.path, start_ccb->crcn.flags,
3261 start_ccb);
3262 break;
3263 case XPT_DEBUG: {
3264 #ifdef CAMDEBUG
3265 int s;
3266
3267 s = splcam();
3268 cam_dflags = start_ccb->cdbg.flags;
3269 if (cam_dpath != NULL) {
3270 xpt_free_path(cam_dpath);
3271 cam_dpath = NULL;
3272 }
3273
3274 if (cam_dflags != CAM_DEBUG_NONE) {
3275 if (xpt_create_path(&cam_dpath, xpt_periph,
3276 start_ccb->ccb_h.path_id,
3277 start_ccb->ccb_h.target_id,
3278 start_ccb->ccb_h.target_lun) !=
3279 CAM_REQ_CMP) {
3280 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3281 cam_dflags = CAM_DEBUG_NONE;
3282 } else {
3283 start_ccb->ccb_h.status = CAM_REQ_CMP;
3284 xpt_print_path(cam_dpath);
3285 printf("debugging flags now %x\n", cam_dflags);
3286 }
3287 } else {
3288 cam_dpath = NULL;
3289 start_ccb->ccb_h.status = CAM_REQ_CMP;
3290 }
3291 splx(s);
3292 #else /* !CAMDEBUG */
3293 start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
3294 #endif /* CAMDEBUG */
3295 break;
3296 }
3297 case XPT_NOOP:
3298 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0)
3299 xpt_freeze_devq(start_ccb->ccb_h.path, 1);
3300 start_ccb->ccb_h.status = CAM_REQ_CMP;
3301 break;
3302 default:
3303 case XPT_SDEV_TYPE:
3304 case XPT_TERM_IO:
3305 case XPT_ENG_INQ:
3306 /* XXX Implement */
3307 start_ccb->ccb_h.status = CAM_PROVIDE_FAIL;
3308 break;
3309 }
3310 splx(iopl);
3311 }
3312
3313 void
3314 xpt_polled_action(union ccb *start_ccb)
3315 {
3316 int s;
3317 u_int32_t timeout;
3318 struct cam_sim *sim;
3319 struct cam_devq *devq;
3320 struct cam_ed *dev;
3321
3322 timeout = start_ccb->ccb_h.timeout;
3323 sim = start_ccb->ccb_h.path->bus->sim;
3324 devq = sim->devq;
3325 dev = start_ccb->ccb_h.path->device;
3326
3327 s = splcam();
3328
3329 /*
3330 * Steal an opening so that no other queued requests
3331 * can get it before us while we simulate interrupts.
3332 */
3333 dev->ccbq.devq_openings--;
3334 dev->ccbq.dev_openings--;
3335
3336 while((devq->send_openings <= 0 || dev->ccbq.dev_openings < 0)
3337 && (--timeout > 0)) {
3338 DELAY(1000);
3339 (*(sim->sim_poll))(sim);
3340 swi_camnet();
3341 swi_cambio();
3342 }
3343
3344 dev->ccbq.devq_openings++;
3345 dev->ccbq.dev_openings++;
3346
3347 if (timeout != 0) {
3348 xpt_action(start_ccb);
3349 while(--timeout > 0) {
3350 (*(sim->sim_poll))(sim);
3351 swi_camnet();
3352 swi_cambio();
3353 if ((start_ccb->ccb_h.status & CAM_STATUS_MASK)
3354 != CAM_REQ_INPROG)
3355 break;
3356 DELAY(1000);
3357 }
3358 if (timeout == 0) {
3359 /*
3360 * XXX Is it worth adding a sim_timeout entry
3361 * point so we can attempt recovery? If
3362 * this is only used for dumps, I don't think
3363 * it is.
3364 */
3365 start_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
3366 }
3367 } else {
3368 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3369 }
3370 splx(s);
3371 }
3372
3373 /*
3374 * Schedule a peripheral driver to receive a ccb when it's
3375 * target device has space for more transactions.
3376 */
3377 void
3378 xpt_schedule(struct cam_periph *perph, u_int32_t new_priority)
3379 {
3380 struct cam_ed *device;
3381 int s;
3382 int runq;
3383
3384 CAM_DEBUG(perph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n"));
3385 device = perph->path->device;
3386 s = splsoftcam();
3387 if (periph_is_queued(perph)) {
3388 /* Simply reorder based on new priority */
3389 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3390 (" change priority to %d\n", new_priority));
3391 if (new_priority < perph->pinfo.priority) {
3392 camq_change_priority(&device->drvq,
3393 perph->pinfo.index,
3394 new_priority);
3395 }
3396 runq = 0;
3397 } else {
3398 /* New entry on the queue */
3399 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3400 (" added periph to queue\n"));
3401 perph->pinfo.priority = new_priority;
3402 perph->pinfo.generation = ++device->drvq.generation;
3403 camq_insert(&device->drvq, &perph->pinfo);
3404 runq = xpt_schedule_dev_allocq(perph->path->bus, device);
3405 }
3406 splx(s);
3407 if (runq != 0) {
3408 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3409 (" calling xpt_run_devq\n"));
3410 xpt_run_dev_allocq(perph->path->bus);
3411 }
3412 }
3413
3414
3415 /*
3416 * Schedule a device to run on a given queue.
3417 * If the device was inserted as a new entry on the queue,
3418 * return 1 meaning the device queue should be run. If we
3419 * were already queued, implying someone else has already
3420 * started the queue, return 0 so the caller doesn't attempt
3421 * to run the queue. Must be run at either splsoftcam
3422 * (or splcam since that encompases splsoftcam).
3423 */
3424 static int
3425 xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo,
3426 u_int32_t new_priority)
3427 {
3428 int retval;
3429 u_int32_t old_priority;
3430
3431 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_schedule_dev\n"));
3432
3433 old_priority = pinfo->priority;
3434
3435 /*
3436 * Are we already queued?
3437 */
3438 if (pinfo->index != CAM_UNQUEUED_INDEX) {
3439 /* Simply reorder based on new priority */
3440 if (new_priority < old_priority) {
3441 camq_change_priority(queue, pinfo->index,
3442 new_priority);
3443 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3444 ("changed priority to %d\n",
3445 new_priority));
3446 }
3447 retval = 0;
3448 } else {
3449 /* New entry on the queue */
3450 if (new_priority < old_priority)
3451 pinfo->priority = new_priority;
3452
3453 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3454 ("Inserting onto queue\n"));
3455 pinfo->generation = ++queue->generation;
3456 camq_insert(queue, pinfo);
3457 retval = 1;
3458 }
3459 return (retval);
3460 }
3461
3462 static void
3463 xpt_run_dev_allocq(struct cam_eb *bus)
3464 {
3465 struct cam_devq *devq;
3466 int s;
3467
3468 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_allocq\n"));
3469 devq = bus->sim->devq;
3470
3471 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3472 (" qfrozen_cnt == 0x%x, entries == %d, "
3473 "openings == %d, active == %d\n",
3474 devq->alloc_queue.qfrozen_cnt,
3475 devq->alloc_queue.entries,
3476 devq->alloc_openings,
3477 devq->alloc_active));
3478
3479 s = splsoftcam();
3480 devq->alloc_queue.qfrozen_cnt++;
3481 while ((devq->alloc_queue.entries > 0)
3482 && (devq->alloc_openings > 0)
3483 && (devq->alloc_queue.qfrozen_cnt <= 1)) {
3484 struct cam_ed_qinfo *qinfo;
3485 struct cam_ed *device;
3486 union ccb *work_ccb;
3487 struct cam_periph *drv;
3488 struct camq *drvq;
3489
3490 qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->alloc_queue,
3491 CAMQ_HEAD);
3492 device = qinfo->device;
3493
3494 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3495 ("running device %p\n", device));
3496
3497 drvq = &device->drvq;
3498
3499 #ifdef CAMDEBUG
3500 if (drvq->entries <= 0) {
3501 panic("xpt_run_dev_allocq: "
3502 "Device on queue without any work to do");
3503 }
3504 #endif
3505 if ((work_ccb = xpt_get_ccb(device)) != NULL) {
3506 devq->alloc_openings--;
3507 devq->alloc_active++;
3508 drv = (struct cam_periph*)camq_remove(drvq, CAMQ_HEAD);
3509 splx(s);
3510 xpt_setup_ccb(&work_ccb->ccb_h, drv->path,
3511 drv->pinfo.priority);
3512 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3513 ("calling periph start\n"));
3514 drv->periph_start(drv, work_ccb);
3515 } else {
3516 /*
3517 * Malloc failure in alloc_ccb
3518 */
3519 /*
3520 * XXX add us to a list to be run from free_ccb
3521 * if we don't have any ccbs active on this
3522 * device queue otherwise we may never get run
3523 * again.
3524 */
3525 break;
3526 }
3527
3528 /* Raise IPL for possible insertion and test at top of loop */
3529 s = splsoftcam();
3530
3531 if (drvq->entries > 0) {
3532 /* We have more work. Attempt to reschedule */
3533 xpt_schedule_dev_allocq(bus, device);
3534 }
3535 }
3536 devq->alloc_queue.qfrozen_cnt--;
3537 splx(s);
3538 }
3539
3540 static void
3541 xpt_run_dev_sendq(struct cam_eb *bus)
3542 {
3543 struct cam_devq *devq;
3544 int s;
3545
3546 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_sendq\n"));
3547
3548 devq = bus->sim->devq;
3549
3550 s = splcam();
3551 devq->send_queue.qfrozen_cnt++;
3552 splx(s);
3553 s = splsoftcam();
3554 while ((devq->send_queue.entries > 0)
3555 && (devq->send_openings > 0)) {
3556 struct cam_ed_qinfo *qinfo;
3557 struct cam_ed *device;
3558 union ccb *work_ccb;
3559 struct cam_sim *sim;
3560 int ospl;
3561
3562 ospl = splcam();
3563 if (devq->send_queue.qfrozen_cnt > 1) {
3564 splx(ospl);
3565 break;
3566 }
3567
3568 qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->send_queue,
3569 CAMQ_HEAD);
3570 device = qinfo->device;
3571
3572 /*
3573 * If the device has been "frozen", don't attempt
3574 * to run it.
3575 */
3576 if (device->qfrozen_cnt > 0) {
3577 splx(ospl);
3578 continue;
3579 }
3580
3581 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3582 ("running device %p\n", device));
3583
3584 work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD);
3585 if (work_ccb == NULL) {
3586 printf("device on run queue with no ccbs???");
3587 splx(ospl);
3588 continue;
3589 }
3590
3591 if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) {
3592
3593 if (num_highpower <= 0) {
3594 /*
3595 * We got a high power command, but we
3596 * don't have any available slots. Freeze
3597 * the device queue until we have a slot
3598 * available.
3599 */
3600 device->qfrozen_cnt++;
3601 STAILQ_INSERT_TAIL(&highpowerq,
3602 &work_ccb->ccb_h,
3603 xpt_links.stqe);
3604
3605 splx(ospl);
3606 continue;
3607 } else {
3608 /*
3609 * Consume a high power slot while
3610 * this ccb runs.
3611 */
3612 num_highpower--;
3613 }
3614 }
3615 devq->active_dev = device;
3616 cam_ccbq_remove_ccb(&device->ccbq, work_ccb);
3617
3618 cam_ccbq_send_ccb(&device->ccbq, work_ccb);
3619 splx(ospl);
3620
3621 devq->send_openings--;
3622 devq->send_active++;
3623
3624 if (device->ccbq.queue.entries > 0)
3625 xpt_schedule_dev_sendq(bus, device);
3626
3627 if (work_ccb && (work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0){
3628 /*
3629 * The client wants to freeze the queue
3630 * after this CCB is sent.
3631 */
3632 ospl = splcam();
3633 device->qfrozen_cnt++;
3634 splx(ospl);
3635 }
3636
3637 splx(s);
3638
3639 if ((device->inq_flags & SID_CmdQue) != 0)
3640 work_ccb->ccb_h.flags |= CAM_TAG_ACTION_VALID;
3641 else
3642 /*
3643 * Clear this in case of a retried CCB that failed
3644 * due to a rejected tag.
3645 */
3646 work_ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
3647
3648 /*
3649 * Device queues can be shared among multiple sim instances
3650 * that reside on different busses. Use the SIM in the queue
3651 * CCB's path, rather than the one in the bus that was passed
3652 * into this function.
3653 */
3654 sim = work_ccb->ccb_h.path->bus->sim;
3655 (*(sim->sim_action))(sim, work_ccb);
3656
3657 ospl = splcam();
3658 devq->active_dev = NULL;
3659 splx(ospl);
3660 /* Raise IPL for possible insertion and test at top of loop */
3661 s = splsoftcam();
3662 }
3663 splx(s);
3664 s = splcam();
3665 devq->send_queue.qfrozen_cnt--;
3666 splx(s);
3667 }
3668
3669 /*
3670 * This function merges stuff from the slave ccb into the master ccb, while
3671 * keeping important fields in the master ccb constant.
3672 */
3673 void
3674 xpt_merge_ccb(union ccb *master_ccb, union ccb *slave_ccb)
3675 {
3676 /*
3677 * Pull fields that are valid for peripheral drivers to set
3678 * into the master CCB along with the CCB "payload".
3679 */
3680 master_ccb->ccb_h.retry_count = slave_ccb->ccb_h.retry_count;
3681 master_ccb->ccb_h.func_code = slave_ccb->ccb_h.func_code;
3682 master_ccb->ccb_h.timeout = slave_ccb->ccb_h.timeout;
3683 master_ccb->ccb_h.flags = slave_ccb->ccb_h.flags;
3684 bcopy(&(&slave_ccb->ccb_h)[1], &(&master_ccb->ccb_h)[1],
3685 sizeof(union ccb) - sizeof(struct ccb_hdr));
3686 }
3687
3688 void
3689 xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority)
3690 {
3691 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_setup_ccb\n"));
3692 ccb_h->pinfo.priority = priority;
3693 ccb_h->path = path;
3694 ccb_h->path_id = path->bus->path_id;
3695 if (path->target)
3696 ccb_h->target_id = path->target->target_id;
3697 else
3698 ccb_h->target_id = CAM_TARGET_WILDCARD;
3699 if (path->device) {
3700 ccb_h->target_lun = path->device->lun_id;
3701 ccb_h->pinfo.generation = ++path->device->ccbq.queue.generation;
3702 } else {
3703 ccb_h->target_lun = CAM_TARGET_WILDCARD;
3704 }
3705 ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
3706 ccb_h->flags = 0;
3707 }
3708
3709 /* Path manipulation functions */
3710 cam_status
3711 xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph,
3712 path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
3713 {
3714 struct cam_path *path;
3715 cam_status status;
3716
3717 path = (struct cam_path *)malloc(sizeof(*path), M_DEVBUF, M_NOWAIT);
3718
3719 if (path == NULL) {
3720 status = CAM_RESRC_UNAVAIL;
3721 return(status);
3722 }
3723 status = xpt_compile_path(path, perph, path_id, target_id, lun_id);
3724 if (status != CAM_REQ_CMP) {
3725 free(path, M_DEVBUF);
3726 path = NULL;
3727 }
3728 *new_path_ptr = path;
3729 return (status);
3730 }
3731
3732 static cam_status
3733 xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph,
3734 path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
3735 {
3736 struct cam_eb *bus;
3737 struct cam_et *target;
3738 struct cam_ed *device;
3739 cam_status status;
3740 int s;
3741
3742 status = CAM_REQ_CMP; /* Completed without error */
3743 target = NULL; /* Wildcarded */
3744 device = NULL; /* Wildcarded */
3745
3746 /*
3747 * We will potentially modify the EDT, so block interrupts
3748 * that may attempt to create cam paths.
3749 */
3750 s = splcam();
3751 bus = xpt_find_bus(path_id);
3752 if (bus == NULL) {
3753 status = CAM_PATH_INVALID;
3754 } else {
3755 target = xpt_find_target(bus, target_id);
3756 if (target == NULL) {
3757 /* Create one */
3758 struct cam_et *new_target;
3759
3760 new_target = xpt_alloc_target(bus, target_id);
3761 if (new_target == NULL) {
3762 status = CAM_RESRC_UNAVAIL;
3763 } else {
3764 target = new_target;
3765 }
3766 }
3767 if (target != NULL) {
3768 device = xpt_find_device(target, lun_id);
3769 if (device == NULL) {
3770 /* Create one */
3771 struct cam_ed *new_device;
3772
3773 new_device = xpt_alloc_device(bus,
3774 target,
3775 lun_id);
3776 if (new_device == NULL) {
3777 status = CAM_RESRC_UNAVAIL;
3778 } else {
3779 device = new_device;
3780 }
3781 }
3782 }
3783 }
3784 splx(s);
3785
3786 /*
3787 * Only touch the user's data if we are successful.
3788 */
3789 if (status == CAM_REQ_CMP) {
3790 new_path->periph = perph;
3791 new_path->bus = bus;
3792 new_path->target = target;
3793 new_path->device = device;
3794 CAM_DEBUG(new_path, CAM_DEBUG_TRACE, ("xpt_compile_path\n"));
3795 } else {
3796 if (device != NULL)
3797 xpt_release_device(bus, target, device);
3798 if (target != NULL)
3799 xpt_release_target(bus, target);
3800 if (bus != NULL)
3801 xpt_release_bus(bus);
3802 }
3803 return (status);
3804 }
3805
3806 static void
3807 xpt_release_path(struct cam_path *path)
3808 {
3809 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_path\n"));
3810 if (path->device != NULL)
3811 xpt_release_device(path->bus, path->target, path->device);
3812 if (path->target != NULL)
3813 xpt_release_target(path->bus, path->target);
3814 }
3815
3816 void
3817 xpt_free_path(struct cam_path *path)
3818 {
3819 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_free_path\n"));
3820 xpt_release_path(path);
3821 free(path, M_DEVBUF);
3822 }
3823
3824
3825 /*
3826 * Return -1 for failure, 0 for exact match, 1 for match with wildcards.
3827 */
3828 int
3829 xpt_path_comp(struct cam_path *path1, struct cam_path *path2)
3830 {
3831 int retval = 0;
3832
3833 if (path1->bus != path2->bus) {
3834 if (path1->bus->path_id == CAM_BUS_WILDCARD
3835 || path2->bus->path_id == CAM_BUS_WILDCARD)
3836 retval = 1;
3837 else
3838 return (-1);
3839 }
3840 if (path1->target != path2->target) {
3841 if (path1->target->target_id == CAM_TARGET_WILDCARD
3842 || path2->target->target_id == CAM_TARGET_WILDCARD)
3843 retval = 1;
3844 else
3845 return (-1);
3846 }
3847 if (path1->device != path2->device) {
3848 if (path1->device->lun_id == CAM_LUN_WILDCARD
3849 || path2->device->lun_id == CAM_LUN_WILDCARD)
3850 retval = 1;
3851 else
3852 return (-1);
3853 }
3854 return (retval);
3855 }
3856
3857 void
3858 xpt_print_path(struct cam_path *path)
3859 {
3860 if (path == NULL)
3861 printf("(nopath): ");
3862 else {
3863 if (path->periph != NULL)
3864 printf("(%s%d:", path->periph->periph_name,
3865 path->periph->unit_number);
3866 else
3867 printf("(noperiph:");
3868
3869 if (path->bus != NULL)
3870 printf("%s%d:%d:", path->bus->sim->sim_name,
3871 path->bus->sim->unit_number,
3872 path->bus->sim->bus_id);
3873 else
3874 printf("nobus:");
3875
3876 if (path->target != NULL)
3877 printf("%d:", path->target->target_id);
3878 else
3879 printf("X:");
3880
3881 if (path->device != NULL)
3882 printf("%d): ", path->device->lun_id);
3883 else
3884 printf("X): ");
3885 }
3886 }
3887
3888 path_id_t
3889 xpt_path_path_id(struct cam_path *path)
3890 {
3891 return(path->bus->path_id);
3892 }
3893
3894 target_id_t
3895 xpt_path_target_id(struct cam_path *path)
3896 {
3897 if (path->target != NULL)
3898 return (path->target->target_id);
3899 else
3900 return (CAM_TARGET_WILDCARD);
3901 }
3902
3903 lun_id_t
3904 xpt_path_lun_id(struct cam_path *path)
3905 {
3906 if (path->device != NULL)
3907 return (path->device->lun_id);
3908 else
3909 return (CAM_LUN_WILDCARD);
3910 }
3911
3912 struct cam_sim *
3913 xpt_path_sim(struct cam_path *path)
3914 {
3915 return (path->bus->sim);
3916 }
3917
3918 struct cam_periph*
3919 xpt_path_periph(struct cam_path *path)
3920 {
3921 return (path->periph);
3922 }
3923
3924 /*
3925 * Release a CAM control block for the caller. Remit the cost of the structure
3926 * to the device referenced by the path. If the this device had no 'credits'
3927 * and peripheral drivers have registered async callbacks for this notification
3928 * call them now.
3929 */
3930 void
3931 xpt_release_ccb(union ccb *free_ccb)
3932 {
3933 int s;
3934 struct cam_path *path;
3935 struct cam_ed *device;
3936 struct cam_eb *bus;
3937
3938 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_release_ccb\n"));
3939 path = free_ccb->ccb_h.path;
3940 device = path->device;
3941 bus = path->bus;
3942 s = splsoftcam();
3943 cam_ccbq_release_opening(&device->ccbq);
3944 if (xpt_ccb_count > xpt_max_ccbs) {
3945 xpt_free_ccb(free_ccb);
3946 xpt_ccb_count--;
3947 } else {
3948 SLIST_INSERT_HEAD(&ccb_freeq, &free_ccb->ccb_h, xpt_links.sle);
3949 }
3950 bus->sim->devq->alloc_openings++;
3951 bus->sim->devq->alloc_active--;
3952 /* XXX Turn this into an inline function - xpt_run_device?? */
3953 if ((device_is_alloc_queued(device) == 0)
3954 && (device->drvq.entries > 0)) {
3955 xpt_schedule_dev_allocq(bus, device);
3956 }
3957 splx(s);
3958 if (dev_allocq_is_runnable(bus->sim->devq))
3959 xpt_run_dev_allocq(bus);
3960 }
3961
3962 /* Functions accessed by SIM drivers */
3963
3964 /*
3965 * A sim structure, listing the SIM entry points and instance
3966 * identification info is passed to xpt_bus_register to hook the SIM
3967 * into the CAM framework. xpt_bus_register creates a cam_eb entry
3968 * for this new bus and places it in the array of busses and assigns
3969 * it a path_id. The path_id may be influenced by "hard wiring"
3970 * information specified by the user. Once interrupt services are
3971 * availible, the bus will be probed.
3972 */
3973 int32_t
3974 xpt_bus_register(struct cam_sim *sim, u_int32_t bus)
3975 {
3976 static path_id_t buscount;
3977 struct cam_eb *new_bus;
3978 struct ccb_pathinq cpi;
3979 int s;
3980
3981 sim->bus_id = bus;
3982 new_bus = (struct cam_eb *)malloc(sizeof(*new_bus),
3983 M_DEVBUF, M_NOWAIT);
3984 if (new_bus == NULL) {
3985 /* Couldn't satisfy request */
3986 return (CAM_RESRC_UNAVAIL);
3987 }
3988
3989 bzero(new_bus, sizeof(*new_bus));
3990
3991 if (strcmp(sim->sim_name, "xpt") != 0) {
3992
3993 sim->path_id = xptpathid(sim->sim_name, sim->unit_number,
3994 sim->bus_id, &buscount);
3995 }
3996
3997 new_bus->path_id = sim->path_id;
3998 new_bus->sim = sim;
3999 TAILQ_INIT(&new_bus->et_entries);
4000 timevalclear(&new_bus->last_reset);
4001 new_bus->refcount = 1; /* Held until a bus_deregister event */
4002 s = splcam();
4003 TAILQ_INSERT_TAIL(&xpt_busses, new_bus, links);
4004 bus_generation++;
4005 splx(s);
4006
4007 /* Notify interested parties */
4008 if (sim->path_id != CAM_XPT_PATH_ID) {
4009 struct cam_path path;
4010
4011 xpt_compile_path(&path, /*periph*/NULL, sim->path_id,
4012 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
4013 xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
4014 cpi.ccb_h.func_code = XPT_PATH_INQ;
4015 xpt_action((union ccb *)&cpi);
4016 xpt_async(AC_PATH_REGISTERED, xpt_periph->path, &cpi);
4017 xpt_release_path(&path);
4018 }
4019 return (CAM_SUCCESS);
4020 }
4021
4022 static int
4023 xptnextfreebus(path_id_t startbus)
4024 {
4025 struct cam_sim_config *sim_conf;
4026
4027 sim_conf = cam_sinit;
4028 while (sim_conf->sim_name != NULL) {
4029
4030 if (IS_SPECIFIED(sim_conf->pathid)
4031 && (startbus == sim_conf->pathid)) {
4032 ++startbus;
4033 /* Start the search over */
4034 sim_conf = cam_sinit;
4035 } else {
4036 sim_conf++;
4037 }
4038 }
4039 return (startbus);
4040 }
4041
4042 static int
4043 xptpathid(const char *sim_name, int sim_unit,
4044 int sim_bus, path_id_t *nextpath)
4045 {
4046 struct cam_sim_config *sim_conf;
4047 path_id_t pathid;
4048
4049 pathid = CAM_XPT_PATH_ID;
4050 for (sim_conf = cam_sinit; sim_conf->sim_name != NULL; sim_conf++) {
4051
4052 if (!IS_SPECIFIED(sim_conf->pathid))
4053 continue;
4054
4055 if (!strcmp(sim_name, sim_conf->sim_name)
4056 && (sim_unit == sim_conf->sim_unit)) {
4057
4058 if (IS_SPECIFIED(sim_conf->sim_bus)) {
4059 if (sim_bus == sim_conf->sim_bus) {
4060 pathid = sim_conf->pathid;
4061 break;
4062 }
4063 } else if (sim_bus == 0) {
4064 /* Unspecified matches bus 0 */
4065 pathid = sim_conf->pathid;
4066 break;
4067 } else {
4068 printf("Ambiguous scbus configuration for %s%d "
4069 "bus %d, cannot wire down. The kernel "
4070 "config entry for scbus%d should "
4071 "specify a controller bus.\n"
4072 "Scbus will be assigned dynamically.\n",
4073 sim_name, sim_unit, sim_bus,
4074 sim_conf->pathid);
4075 break;
4076 }
4077 }
4078 }
4079
4080 if (pathid == CAM_XPT_PATH_ID) {
4081 pathid = xptnextfreebus(*nextpath);
4082 *nextpath = pathid + 1;
4083 }
4084 return (pathid);
4085 }
4086
4087 int32_t
4088 xpt_bus_deregister(path_id)
4089 u_int8_t path_id;
4090 {
4091 /* XXX */
4092 return (CAM_SUCCESS);
4093 }
4094
4095 void
4096 xpt_async(u_int32_t async_code, struct cam_path *path, void *async_arg)
4097 {
4098 struct cam_eb *bus;
4099 struct cam_et *target, *next_target;
4100 struct cam_ed *device, *next_device;
4101 int s;
4102
4103 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_async\n"));
4104
4105 /*
4106 * Most async events come from a CAM interrupt context. In
4107 * a few cases, the error recovery code at the peripheral layer,
4108 * which may run from our SWI or a process context, may signal
4109 * deferred events with a call to xpt_async. Ensure async
4110 * notifications are serialized by blocking cam interrupts.
4111 */
4112 s = splcam();
4113
4114 bus = path->bus;
4115
4116 if (async_code == AC_BUS_RESET) {
4117 int s;
4118
4119 s = splclock();
4120 /* Update our notion of when the last reset occurred */
4121 microtime(&bus->last_reset);
4122 splx(s);
4123 }
4124
4125 for (target = TAILQ_FIRST(&bus->et_entries);
4126 target != NULL;
4127 target = next_target) {
4128
4129 next_target = TAILQ_NEXT(target, links);
4130
4131 if (path->target != target
4132 && path->target->target_id != CAM_TARGET_WILDCARD)
4133 continue;
4134
4135 if (async_code == AC_SENT_BDR) {
4136 int s;
4137
4138 /* Update our notion of when the last reset occurred */
4139 s = splclock();
4140 microtime(&path->target->last_reset);
4141 splx(s);
4142 }
4143
4144 for (device = TAILQ_FIRST(&target->ed_entries);
4145 device != NULL;
4146 device = next_device) {
4147 cam_status status;
4148 struct cam_path newpath;
4149
4150 next_device = TAILQ_NEXT(device, links);
4151
4152 if (path->device != device
4153 && path->device->lun_id != CAM_LUN_WILDCARD)
4154 continue;
4155
4156 /*
4157 * We need our own path with wildcards expanded to
4158 * handle certain types of events.
4159 */
4160 if ((async_code == AC_SENT_BDR)
4161 || (async_code == AC_BUS_RESET)
4162 || (async_code == AC_INQ_CHANGED))
4163 status = xpt_compile_path(&newpath, NULL,
4164 bus->path_id,
4165 target->target_id,
4166 device->lun_id);
4167 else
4168 status = CAM_REQ_CMP_ERR;
4169
4170 if (status == CAM_REQ_CMP) {
4171
4172 /*
4173 * Allow transfer negotiation to occur in a
4174 * tag free environment.
4175 */
4176 if (async_code == AC_SENT_BDR
4177 || async_code == AC_BUS_RESET)
4178 xpt_toggle_tags(&newpath);
4179
4180 if (async_code == AC_INQ_CHANGED) {
4181 /*
4182 * We've sent a start unit command, or
4183 * something similar to a device that
4184 * may have caused its inquiry data to
4185 * change. So we re-scan the device to
4186 * refresh the inquiry data for it.
4187 */
4188 xpt_scan_lun(newpath.periph, &newpath,
4189 CAM_EXPECT_INQ_CHANGE,
4190 NULL);
4191 }
4192 xpt_release_path(&newpath);
4193 } else if (async_code == AC_LOST_DEVICE) {
4194 device->flags |= CAM_DEV_UNCONFIGURED;
4195 } else if (async_code == AC_TRANSFER_NEG) {
4196 struct ccb_trans_settings *settings;
4197
4198 settings =
4199 (struct ccb_trans_settings *)async_arg;
4200 xpt_set_transfer_settings(settings, device,
4201 /*async_update*/TRUE);
4202 }
4203
4204 xpt_async_bcast(&device->asyncs,
4205 async_code,
4206 path,
4207 async_arg);
4208 }
4209 }
4210
4211 /*
4212 * If this wasn't a fully wildcarded async, tell all
4213 * clients that want all async events.
4214 */
4215 if (bus != xpt_periph->path->bus)
4216 xpt_async_bcast(&xpt_periph->path->device->asyncs, async_code,
4217 path, async_arg);
4218 splx(s);
4219 }
4220
4221 static void
4222 xpt_async_bcast(struct async_list *async_head,
4223 u_int32_t async_code,
4224 struct cam_path *path, void *async_arg)
4225 {
4226 struct async_node *cur_entry;
4227
4228 cur_entry = SLIST_FIRST(async_head);
4229 while (cur_entry != NULL) {
4230 struct async_node *next_entry;
4231 /*
4232 * Grab the next list entry before we call the current
4233 * entry's callback. This is because the callback function
4234 * can delete its async callback entry.
4235 */
4236 next_entry = SLIST_NEXT(cur_entry, links);
4237 if ((cur_entry->event_enable & async_code) != 0)
4238 cur_entry->callback(cur_entry->callback_arg,
4239 async_code, path,
4240 async_arg);
4241 cur_entry = next_entry;
4242 }
4243 }
4244
4245 u_int32_t
4246 xpt_freeze_devq(struct cam_path *path, u_int count)
4247 {
4248 int s;
4249 struct ccb_hdr *ccbh;
4250
4251 s = splcam();
4252 path->device->qfrozen_cnt += count;
4253
4254 /*
4255 * Mark the last CCB in the queue as needing
4256 * to be requeued if the driver hasn't
4257 * changed it's state yet. This fixes a race
4258 * where a ccb is just about to be queued to
4259 * a controller driver when it's interrupt routine
4260 * freezes the queue. To completly close the
4261 * hole, controller drives must check to see
4262 * if a ccb's status is still CAM_REQ_INPROG
4263 * under spl protection just before they queue
4264 * the CCB. See ahc_action/ahc_freeze_devq for
4265 * an example.
4266 */
4267 ccbh = TAILQ_LAST(&path->device->ccbq.active_ccbs, ccb_hdr_tailq);
4268 if (ccbh && ccbh->status == CAM_REQ_INPROG)
4269 ccbh->status = CAM_REQUEUE_REQ;
4270 splx(s);
4271 return (path->device->qfrozen_cnt);
4272 }
4273
4274 u_int32_t
4275 xpt_freeze_simq(struct cam_sim *sim, u_int count)
4276 {
4277 sim->devq->send_queue.qfrozen_cnt += count;
4278 if (sim->devq->active_dev != NULL) {
4279 struct ccb_hdr *ccbh;
4280
4281 ccbh = TAILQ_LAST(&sim->devq->active_dev->ccbq.active_ccbs,
4282 ccb_hdr_tailq);
4283 if (ccbh && ccbh->status == CAM_REQ_INPROG)
4284 ccbh->status = CAM_REQUEUE_REQ;
4285 }
4286 return (sim->devq->send_queue.qfrozen_cnt);
4287 }
4288
4289 static void
4290 xpt_release_devq_timeout(void *arg)
4291 {
4292 struct cam_ed *device;
4293
4294 device = (struct cam_ed *)arg;
4295
4296 xpt_release_devq(device, /*run_queue*/TRUE);
4297 }
4298
4299 void
4300 xpt_release_devq(struct cam_ed *dev, int run_queue)
4301 {
4302 int rundevq;
4303 int s0, s1;
4304
4305 rundevq = 0;
4306 s0 = splsoftcam();
4307 s1 = splcam();
4308 if (dev->qfrozen_cnt > 0) {
4309
4310 dev->qfrozen_cnt--;
4311 if (dev->qfrozen_cnt == 0) {
4312
4313 /*
4314 * No longer need to wait for a successful
4315 * command completion.
4316 */
4317 dev->flags &= ~CAM_DEV_REL_ON_COMPLETE;
4318
4319 /*
4320 * Remove any timeouts that might be scheduled
4321 * to release this queue.
4322 */
4323 if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
4324 untimeout(xpt_release_devq_timeout, dev,
4325 dev->c_handle);
4326 dev->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING;
4327 }
4328
4329 /*
4330 * Now that we are unfrozen schedule the
4331 * device so any pending transactions are
4332 * run.
4333 */
4334 if ((dev->ccbq.queue.entries > 0)
4335 && (xpt_schedule_dev_sendq(dev->target->bus, dev))
4336 && (run_queue != 0)) {
4337 rundevq = 1;
4338 }
4339 }
4340 }
4341 splx(s1);
4342 if (rundevq != 0)
4343 xpt_run_dev_sendq(dev->target->bus);
4344 splx(s0);
4345 }
4346
4347 void
4348 xpt_release_simq(struct cam_sim *sim, int run_queue)
4349 {
4350 int s;
4351 struct camq *sendq;
4352
4353 sendq = &(sim->devq->send_queue);
4354 s = splcam();
4355 if (sendq->qfrozen_cnt > 0) {
4356
4357 sendq->qfrozen_cnt--;
4358 if (sendq->qfrozen_cnt == 0) {
4359 struct cam_eb *bus;
4360
4361 /*
4362 * If there is a timeout scheduled to release this
4363 * sim queue, remove it. The queue frozen count is
4364 * already at 0.
4365 */
4366 if ((sim->flags & CAM_SIM_REL_TIMEOUT_PENDING) != 0){
4367 untimeout(xpt_release_simq_timeout, sim,
4368 sim->c_handle);
4369 sim->flags &= ~CAM_SIM_REL_TIMEOUT_PENDING;
4370 }
4371 bus = xpt_find_bus(sim->path_id);
4372 splx(s);
4373
4374 if (run_queue) {
4375 /*
4376 * Now that we are unfrozen run the send queue.
4377 */
4378 xpt_run_dev_sendq(bus);
4379 }
4380 xpt_release_bus(bus);
4381 } else
4382 splx(s);
4383 } else
4384 splx(s);
4385 }
4386
4387 static void
4388 xpt_release_simq_timeout(void *arg)
4389 {
4390 struct cam_sim *sim;
4391
4392 sim = (struct cam_sim *)arg;
4393 xpt_release_simq(sim, /* run_queue */ TRUE);
4394 }
4395
4396 void
4397 xpt_done(union ccb *done_ccb)
4398 {
4399 int s;
4400
4401 s = splcam();
4402
4403 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_done\n"));
4404 if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) != 0) {
4405 /*
4406 * Queue up the request for handling by our SWI handler
4407 * any of the "non-immediate" type of ccbs.
4408 */
4409 switch (done_ccb->ccb_h.path->periph->type) {
4410 case CAM_PERIPH_BIO:
4411 TAILQ_INSERT_TAIL(&cam_bioq, &done_ccb->ccb_h,
4412 sim_links.tqe);
4413 done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX;
4414 setsoftcambio();
4415 break;
4416 case CAM_PERIPH_NET:
4417 TAILQ_INSERT_TAIL(&cam_netq, &done_ccb->ccb_h,
4418 sim_links.tqe);
4419 done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX;
4420 setsoftcamnet();
4421 break;
4422 }
4423 }
4424 splx(s);
4425 }
4426
4427 union ccb *
4428 xpt_alloc_ccb()
4429 {
4430 union ccb *new_ccb;
4431
4432 new_ccb = malloc(sizeof(*new_ccb), M_DEVBUF, M_WAITOK);
4433 return (new_ccb);
4434 }
4435
4436 void
4437 xpt_free_ccb(union ccb *free_ccb)
4438 {
4439 free(free_ccb, M_DEVBUF);
4440 }
4441
4442
4443
4444 /* Private XPT functions */
4445
4446 /*
4447 * Get a CAM control block for the caller. Charge the structure to the device
4448 * referenced by the path. If the this device has no 'credits' then the
4449 * device already has the maximum number of outstanding operations under way
4450 * and we return NULL. If we don't have sufficient resources to allocate more
4451 * ccbs, we also return NULL.
4452 */
4453 static union ccb *
4454 xpt_get_ccb(struct cam_ed *device)
4455 {
4456 union ccb *new_ccb;
4457 int s;
4458
4459 s = splsoftcam();
4460 if ((new_ccb = (union ccb *)ccb_freeq.slh_first) == NULL) {
4461 new_ccb = malloc(sizeof(*new_ccb), M_DEVBUF, M_NOWAIT);
4462 if (new_ccb == NULL) {
4463 splx(s);
4464 return (NULL);
4465 }
4466 callout_handle_init(&new_ccb->ccb_h.timeout_ch);
4467 SLIST_INSERT_HEAD(&ccb_freeq, &new_ccb->ccb_h,
4468 xpt_links.sle);
4469 xpt_ccb_count++;
4470 }
4471 cam_ccbq_take_opening(&device->ccbq);
4472 SLIST_REMOVE_HEAD(&ccb_freeq, xpt_links.sle);
4473 splx(s);
4474 return (new_ccb);
4475 }
4476
4477 static void
4478 xpt_release_bus(struct cam_eb *bus)
4479 {
4480 int s;
4481
4482 s = splcam();
4483 if ((--bus->refcount == 0)
4484 && (TAILQ_FIRST(&bus->et_entries) == NULL)) {
4485 TAILQ_REMOVE(&xpt_busses, bus, links);
4486 bus_generation++;
4487 splx(s);
4488 free(bus, M_DEVBUF);
4489 } else
4490 splx(s);
4491 }
4492
4493 static struct cam_et *
4494 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id)
4495 {
4496 struct cam_et *target;
4497
4498 target = (struct cam_et *)malloc(sizeof(*target), M_DEVBUF, M_NOWAIT);
4499 if (target != NULL) {
4500 struct cam_et *cur_target;
4501
4502 target->bus = bus;
4503 target->target_id = target_id;
4504 target->refcount = 1;
4505 /*
4506 * Hold a reference to our parent bus so it
4507 * will not go away before we do.
4508 */
4509 bus->refcount++;
4510 TAILQ_INIT(&target->ed_entries);
4511 timevalclear(&target->last_reset);
4512
4513 /* Insertion sort into our bus's target list */
4514 cur_target = TAILQ_FIRST(&bus->et_entries);
4515 while (cur_target != NULL && cur_target->target_id < target_id)
4516 cur_target = TAILQ_NEXT(cur_target, links);
4517
4518 if (cur_target != NULL) {
4519 TAILQ_INSERT_BEFORE(cur_target, target, links);
4520 } else {
4521 TAILQ_INSERT_TAIL(&bus->et_entries, target, links);
4522 }
4523 bus->generation++;
4524 }
4525 return (target);
4526 }
4527
4528 static void
4529 xpt_release_target(struct cam_eb *bus, struct cam_et *target)
4530 {
4531 int s;
4532
4533 s = splcam();
4534 if ((--target->refcount == 0)
4535 && (TAILQ_FIRST(&target->ed_entries) == NULL)) {
4536 TAILQ_REMOVE(&bus->et_entries, target, links);
4537 bus->generation++;
4538 splx(s);
4539 free(target, M_DEVBUF);
4540 xpt_release_bus(bus);
4541 } else
4542 splx(s);
4543 }
4544
4545 static struct cam_ed *
4546 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id)
4547 {
4548 struct cam_ed *device;
4549 struct cam_devq *devq;
4550 cam_status status;
4551
4552 /* Make space for us in the device queue on our bus */
4553 devq = bus->sim->devq;
4554 status = cam_devq_resize(devq, devq->alloc_queue.array_size + 1);
4555
4556 if (status != CAM_REQ_CMP) {
4557 device = NULL;
4558 } else {
4559 device = (struct cam_ed *)malloc(sizeof(*device),
4560 M_DEVBUF, M_NOWAIT);
4561 }
4562
4563 if (device != NULL) {
4564 struct cam_ed *cur_device;
4565
4566 bzero(device, sizeof(*device));
4567
4568 SLIST_INIT(&device->asyncs);
4569 SLIST_INIT(&device->periphs);
4570 callout_handle_init(&device->c_handle);
4571 device->refcount = 1;
4572 device->flags |= CAM_DEV_UNCONFIGURED;
4573 /*
4574 * Take the default quirk entry until we have inquiry
4575 * data and can determine a better quirk to use.
4576 */
4577 device->quirk = &xpt_quirk_table[xpt_quirk_table_size - 1];
4578
4579 cam_init_pinfo(&device->alloc_ccb_entry.pinfo);
4580 device->alloc_ccb_entry.device = device;
4581 cam_init_pinfo(&device->send_ccb_entry.pinfo);
4582 device->send_ccb_entry.device = device;
4583
4584 device->target = target;
4585 /*
4586 * Hold a reference to our parent target so it
4587 * will not go away before we do.
4588 */
4589 target->refcount++;
4590
4591 device->lun_id = lun_id;
4592
4593 /* Initialize our queues */
4594 if (camq_init(&device->drvq, 0) != 0) {
4595 free(device, M_DEVBUF);
4596 return (NULL);
4597 }
4598
4599 if (cam_ccbq_init(&device->ccbq,
4600 bus->sim->max_dev_openings) != 0) {
4601 camq_fini(&device->drvq);
4602 free(device, M_DEVBUF);
4603 return (NULL);
4604 }
4605 /*
4606 * XXX should be limited by number of CCBs this bus can
4607 * do.
4608 */
4609 xpt_max_ccbs += device->ccbq.devq_openings;
4610 /* Insertion sort into our target's device list */
4611 cur_device = TAILQ_FIRST(&target->ed_entries);
4612 while (cur_device != NULL && cur_device->lun_id < lun_id)
4613 cur_device = TAILQ_NEXT(cur_device, links);
4614 if (cur_device != NULL) {
4615 TAILQ_INSERT_BEFORE(cur_device, device, links);
4616 } else {
4617 TAILQ_INSERT_TAIL(&target->ed_entries, device, links);
4618 }
4619 target->generation++;
4620 }
4621 return (device);
4622 }
4623
4624 static void
4625 xpt_release_device(struct cam_eb *bus, struct cam_et *target,
4626 struct cam_ed *device)
4627 {
4628 int s;
4629
4630 s = splcam();
4631 if ((--device->refcount == 0)
4632 && ((device->flags & CAM_DEV_UNCONFIGURED) != 0)) {
4633 struct cam_devq *devq;
4634
4635 if (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX
4636 || device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX)
4637 panic("Removing device while still queued for ccbs");
4638 TAILQ_REMOVE(&target->ed_entries, device,links);
4639 target->generation++;
4640 xpt_max_ccbs -= device->ccbq.devq_openings;
4641 /* Release our slot in the devq */
4642 devq = bus->sim->devq;
4643 cam_devq_resize(devq, devq->alloc_queue.array_size - 1);
4644 splx(s);
4645 free(device, M_DEVBUF);
4646 } else
4647 splx(s);
4648 }
4649
4650 static u_int32_t
4651 xpt_dev_ccbq_resize(struct cam_path *path, int newopenings)
4652 {
4653 int s;
4654 int diff;
4655 int result;
4656 struct cam_ed *dev;
4657
4658 dev = path->device;
4659 s = splsoftcam();
4660
4661 diff = newopenings - (dev->ccbq.dev_active + dev->ccbq.dev_openings);
4662 result = cam_ccbq_resize(&dev->ccbq, newopenings);
4663 if (result == CAM_REQ_CMP && (diff < 0)) {
4664 dev->flags |= CAM_DEV_RESIZE_QUEUE_NEEDED;
4665 }
4666 /* Adjust the global limit */
4667 xpt_max_ccbs += diff;
4668 splx(s);
4669 return (result);
4670 }
4671
4672 static struct cam_eb *
4673 xpt_find_bus(path_id_t path_id)
4674 {
4675 struct cam_eb *bus;
4676
4677 for (bus = TAILQ_FIRST(&xpt_busses);
4678 bus != NULL;
4679 bus = TAILQ_NEXT(bus, links)) {
4680 if (bus->path_id == path_id) {
4681 bus->refcount++;
4682 break;
4683 }
4684 }
4685 return (bus);
4686 }
4687
4688 static struct cam_et *
4689 xpt_find_target(struct cam_eb *bus, target_id_t target_id)
4690 {
4691 struct cam_et *target;
4692
4693 for (target = TAILQ_FIRST(&bus->et_entries);
4694 target != NULL;
4695 target = TAILQ_NEXT(target, links)) {
4696 if (target->target_id == target_id) {
4697 target->refcount++;
4698 break;
4699 }
4700 }
4701 return (target);
4702 }
4703
4704 static struct cam_ed *
4705 xpt_find_device(struct cam_et *target, lun_id_t lun_id)
4706 {
4707 struct cam_ed *device;
4708
4709 for (device = TAILQ_FIRST(&target->ed_entries);
4710 device != NULL;
4711 device = TAILQ_NEXT(device, links)) {
4712 if (device->lun_id == lun_id) {
4713 device->refcount++;
4714 break;
4715 }
4716 }
4717 return (device);
4718 }
4719
4720 typedef struct {
4721 union ccb *request_ccb;
4722 struct ccb_pathinq *cpi;
4723 int pending_count;
4724 } xpt_scan_bus_info;
4725
4726 /*
4727 * To start a scan, request_ccb is an XPT_SCAN_BUS ccb.
4728 * As the scan progresses, xpt_scan_bus is used as the
4729 * callback on completion function.
4730 */
4731 static void
4732 xpt_scan_bus(struct cam_periph *periph, union ccb *request_ccb)
4733 {
4734 CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE,
4735 ("xpt_scan_bus\n"));
4736 switch (request_ccb->ccb_h.func_code) {
4737 case XPT_SCAN_BUS:
4738 {
4739 xpt_scan_bus_info *scan_info;
4740 union ccb *work_ccb;
4741 struct cam_path *path;
4742 u_int i;
4743 u_int max_target;
4744 u_int initiator_id;
4745
4746 /* Find out the characteristics of the bus */
4747 work_ccb = xpt_alloc_ccb();
4748 xpt_setup_ccb(&work_ccb->ccb_h, request_ccb->ccb_h.path,
4749 request_ccb->ccb_h.pinfo.priority);
4750 work_ccb->ccb_h.func_code = XPT_PATH_INQ;
4751 xpt_action(work_ccb);
4752 if (work_ccb->ccb_h.status != CAM_REQ_CMP) {
4753 request_ccb->ccb_h.status = work_ccb->ccb_h.status;
4754 xpt_free_ccb(work_ccb);
4755 xpt_done(request_ccb);
4756 return;
4757 }
4758
4759 if ((work_ccb->cpi.hba_misc & PIM_NOINITIATOR) != 0) {
4760 /*
4761 * Can't scan the bus on an adapter that
4762 * cannot perform the initiator role.
4763 */
4764 request_ccb->ccb_h.status = CAM_REQ_CMP;
4765 xpt_free_ccb(work_ccb);
4766 xpt_done(request_ccb);
4767 return;
4768 }
4769
4770 /* Save some state for use while we probe for devices */
4771 scan_info = (xpt_scan_bus_info *)
4772 malloc(sizeof(xpt_scan_bus_info), M_TEMP, M_WAITOK);
4773 scan_info->request_ccb = request_ccb;
4774 scan_info->cpi = &work_ccb->cpi;
4775
4776 /* Cache on our stack so we can work asynchronously */
4777 max_target = scan_info->cpi->max_target;
4778 initiator_id = scan_info->cpi->initiator_id;
4779
4780 /*
4781 * Don't count the initiator if the
4782 * initiator is addressable.
4783 */
4784 scan_info->pending_count = max_target + 1;
4785 if (initiator_id <= max_target)
4786 scan_info->pending_count--;
4787
4788 for (i = 0; i <= max_target; i++) {
4789 cam_status status;
4790 if (i == initiator_id)
4791 continue;
4792
4793 status = xpt_create_path(&path, xpt_periph,
4794 request_ccb->ccb_h.path_id,
4795 i, 0);
4796 if (status != CAM_REQ_CMP) {
4797 printf("xpt_scan_bus: xpt_create_path failed"
4798 " with status %#x, bus scan halted\n",
4799 status);
4800 break;
4801 }
4802 work_ccb = xpt_alloc_ccb();
4803 xpt_setup_ccb(&work_ccb->ccb_h, path,
4804 request_ccb->ccb_h.pinfo.priority);
4805 work_ccb->ccb_h.func_code = XPT_SCAN_LUN;
4806 work_ccb->ccb_h.cbfcnp = xpt_scan_bus;
4807 work_ccb->ccb_h.ppriv_ptr0 = scan_info;
4808 work_ccb->crcn.flags = request_ccb->crcn.flags;
4809 #if 0
4810 printf("xpt_scan_bus: probing %d:%d:%d\n",
4811 request_ccb->ccb_h.path_id, i, 0);
4812 #endif
4813 xpt_action(work_ccb);
4814 }
4815 break;
4816 }
4817 case XPT_SCAN_LUN:
4818 {
4819 xpt_scan_bus_info *scan_info;
4820 path_id_t path_id;
4821 target_id_t target_id;
4822 lun_id_t lun_id;
4823
4824 /* Reuse the same CCB to query if a device was really found */
4825 scan_info = (xpt_scan_bus_info *)request_ccb->ccb_h.ppriv_ptr0;
4826 xpt_setup_ccb(&request_ccb->ccb_h, request_ccb->ccb_h.path,
4827 request_ccb->ccb_h.pinfo.priority);
4828 request_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
4829
4830 path_id = request_ccb->ccb_h.path_id;
4831 target_id = request_ccb->ccb_h.target_id;
4832 lun_id = request_ccb->ccb_h.target_lun;
4833 xpt_action(request_ccb);
4834
4835 #if 0
4836 printf("xpt_scan_bus: got back probe from %d:%d:%d\n",
4837 path_id, target_id, lun_id);
4838 #endif
4839
4840 if (request_ccb->ccb_h.status != CAM_REQ_CMP) {
4841 struct cam_ed *device;
4842 struct cam_et *target;
4843 int s, phl;
4844
4845 /*
4846 * If we already probed lun 0 successfully, or
4847 * we have additional configured luns on this
4848 * target that might have "gone away", go onto
4849 * the next lun.
4850 */
4851 target = request_ccb->ccb_h.path->target;
4852 /*
4853 * We may touch devices that we don't
4854 * hold references too, so ensure they
4855 * don't disappear out from under us.
4856 * The target above is referenced by the
4857 * path in the request ccb.
4858 */
4859 phl = 0;
4860 s = splcam();
4861 device = TAILQ_FIRST(&target->ed_entries);
4862 if (device != NULL) {
4863 phl = device->quirk->quirks & CAM_QUIRK_HILUNS;
4864 if (device->lun_id == 0)
4865 device = TAILQ_NEXT(device, links);
4866 }
4867 splx(s);
4868 if ((lun_id != 0) || (device != NULL)) {
4869 if (lun_id < (CAM_SCSI2_MAXLUN-1) || phl)
4870 lun_id++;
4871 }
4872 } else {
4873 struct cam_ed *device;
4874
4875 device = request_ccb->ccb_h.path->device;
4876
4877 if ((device->quirk->quirks & CAM_QUIRK_NOLUNS) == 0) {
4878 /* Try the next lun */
4879 if (lun_id < (CAM_SCSI2_MAXLUN-1) ||
4880 (device->quirk->quirks & CAM_QUIRK_HILUNS))
4881 lun_id++;
4882 }
4883 }
4884
4885 xpt_free_path(request_ccb->ccb_h.path);
4886
4887 /* Check Bounds */
4888 if ((lun_id == request_ccb->ccb_h.target_lun)
4889 || lun_id > scan_info->cpi->max_lun) {
4890 /* We're done */
4891
4892 xpt_free_ccb(request_ccb);
4893 scan_info->pending_count--;
4894 if (scan_info->pending_count == 0) {
4895 xpt_free_ccb((union ccb *)scan_info->cpi);
4896 request_ccb = scan_info->request_ccb;
4897 free(scan_info, M_TEMP);
4898 request_ccb->ccb_h.status = CAM_REQ_CMP;
4899 xpt_done(request_ccb);
4900 }
4901 } else {
4902 /* Try the next device */
4903 struct cam_path *path;
4904 cam_status status;
4905
4906 path = request_ccb->ccb_h.path;
4907 status = xpt_create_path(&path, xpt_periph,
4908 path_id, target_id, lun_id);
4909 if (status != CAM_REQ_CMP) {
4910 printf("xpt_scan_bus: xpt_create_path failed "
4911 "with status %#x, halting LUN scan\n",
4912 status);
4913 xpt_free_ccb(request_ccb);
4914 scan_info->pending_count--;
4915 if (scan_info->pending_count == 0) {
4916 xpt_free_ccb(
4917 (union ccb *)scan_info->cpi);
4918 request_ccb = scan_info->request_ccb;
4919 free(scan_info, M_TEMP);
4920 request_ccb->ccb_h.status = CAM_REQ_CMP;
4921 xpt_done(request_ccb);
4922 break;
4923 }
4924 }
4925 xpt_setup_ccb(&request_ccb->ccb_h, path,
4926 request_ccb->ccb_h.pinfo.priority);
4927 request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
4928 request_ccb->ccb_h.cbfcnp = xpt_scan_bus;
4929 request_ccb->ccb_h.ppriv_ptr0 = scan_info;
4930 request_ccb->crcn.flags =
4931 scan_info->request_ccb->crcn.flags;
4932 #if 0
4933 xpt_print_path(path);
4934 printf("xpt_scan bus probing\n");
4935 #endif
4936 xpt_action(request_ccb);
4937 }
4938 break;
4939 }
4940 default:
4941 break;
4942 }
4943 }
4944
4945 typedef enum {
4946 PROBE_TUR,
4947 PROBE_INQUIRY,
4948 PROBE_MODE_SENSE,
4949 PROBE_SERIAL_NUM,
4950 PROBE_TUR_FOR_NEGOTIATION
4951 } probe_action;
4952
4953 typedef enum {
4954 PROBE_INQUIRY_CKSUM = 0x01,
4955 PROBE_SERIAL_CKSUM = 0x02,
4956 PROBE_NO_ANNOUNCE = 0x04
4957 } probe_flags;
4958
4959 typedef struct {
4960 TAILQ_HEAD(, ccb_hdr) request_ccbs;
4961 probe_action action;
4962 union ccb saved_ccb;
4963 probe_flags flags;
4964 MD5_CTX context;
4965 u_int8_t digest[16];
4966 } probe_softc;
4967
4968 static void
4969 xpt_scan_lun(struct cam_periph *periph, struct cam_path *path,
4970 cam_flags flags, union ccb *request_ccb)
4971 {
4972 struct ccb_pathinq cpi;
4973 cam_status status;
4974 struct cam_path *new_path;
4975 struct cam_periph *old_periph;
4976 int s;
4977
4978 CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE,
4979 ("xpt_scan_lun\n"));
4980
4981 xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
4982 cpi.ccb_h.func_code = XPT_PATH_INQ;
4983 xpt_action((union ccb *)&cpi);
4984
4985 if (cpi.ccb_h.status != CAM_REQ_CMP) {
4986 if (request_ccb != NULL) {
4987 request_ccb->ccb_h.status = cpi.ccb_h.status;
4988 xpt_done(request_ccb);
4989 }
4990 return;
4991 }
4992
4993 if ((cpi.hba_misc & PIM_NOINITIATOR) != 0) {
4994 /*
4995 * Can't scan the bus on an adapter that
4996 * cannot perform the initiator role.
4997 */
4998 if (request_ccb != NULL) {
4999 request_ccb->ccb_h.status = CAM_REQ_CMP;
5000 xpt_done(request_ccb);
5001 }
5002 return;
5003 }
5004
5005 if (request_ccb == NULL) {
5006 request_ccb = malloc(sizeof(union ccb), M_TEMP, M_NOWAIT);
5007 if (request_ccb == NULL) {
5008 xpt_print_path(path);
5009 printf("xpt_scan_lun: can't allocate CCB, can't "
5010 "continue\n");
5011 return;
5012 }
5013 new_path = malloc(sizeof(*new_path), M_TEMP, M_NOWAIT);
5014 if (new_path == NULL) {
5015 xpt_print_path(path);
5016 printf("xpt_scan_lun: can't allocate path, can't "
5017 "continue\n");
5018 free(request_ccb, M_TEMP);
5019 return;
5020 }
5021 status = xpt_compile_path(new_path, xpt_periph,
5022 path->bus->path_id,
5023 path->target->target_id,
5024 path->device->lun_id);
5025
5026 if (status != CAM_REQ_CMP) {
5027 xpt_print_path(path);
5028 printf("xpt_scan_lun: can't compile path, can't "
5029 "continue\n");
5030 free(request_ccb, M_TEMP);
5031 free(new_path, M_TEMP);
5032 return;
5033 }
5034 xpt_setup_ccb(&request_ccb->ccb_h, new_path, /*priority*/ 1);
5035 request_ccb->ccb_h.cbfcnp = xptscandone;
5036 request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
5037 request_ccb->crcn.flags = flags;
5038 }
5039
5040 s = splsoftcam();
5041 if ((old_periph = cam_periph_find(path, "probe")) != NULL) {
5042 probe_softc *softc;
5043
5044 softc = (probe_softc *)old_periph->softc;
5045 TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h,
5046 periph_links.tqe);
5047 } else {
5048 status = cam_periph_alloc(proberegister, NULL, probecleanup,
5049 probestart, "probe",
5050 CAM_PERIPH_BIO,
5051 request_ccb->ccb_h.path, NULL, 0,
5052 request_ccb);
5053
5054 if (status != CAM_REQ_CMP) {
5055 xpt_print_path(path);
5056 printf("xpt_scan_lun: cam_alloc_periph returned an "
5057 "error, can't continue probe\n");
5058 request_ccb->ccb_h.status = status;
5059 xpt_done(request_ccb);
5060 }
5061 }
5062 splx(s);
5063 }
5064
5065 static void
5066 xptscandone(struct cam_periph *periph, union ccb *done_ccb)
5067 {
5068 xpt_release_path(done_ccb->ccb_h.path);
5069 free(done_ccb->ccb_h.path, M_TEMP);
5070 free(done_ccb, M_TEMP);
5071 }
5072
5073 static cam_status
5074 proberegister(struct cam_periph *periph, void *arg)
5075 {
5076 union ccb *request_ccb; /* CCB representing the probe request */
5077 probe_softc *softc;
5078
5079 request_ccb = (union ccb *)arg;
5080 if (periph == NULL) {
5081 printf("proberegister: periph was NULL!!\n");
5082 return(CAM_REQ_CMP_ERR);
5083 }
5084
5085 if (request_ccb == NULL) {
5086 printf("proberegister: no probe CCB, can't register device\n");
5087 return(CAM_REQ_CMP_ERR);
5088 }
5089
5090 softc = (probe_softc *)malloc(sizeof(*softc), M_TEMP, M_NOWAIT);
5091
5092 if (softc == NULL) {
5093 printf("proberegister: Unable to probe new device. "
5094 "Unable to allocate softc\n");
5095 return(CAM_REQ_CMP_ERR);
5096 }
5097 TAILQ_INIT(&softc->request_ccbs);
5098 TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h,
5099 periph_links.tqe);
5100 softc->flags = 0;
5101 periph->softc = softc;
5102 cam_periph_acquire(periph);
5103 /*
5104 * Ensure we've waited at least a bus settle
5105 * delay before attempting to probe the device.
5106 */
5107 cam_periph_freeze_after_event(periph, &periph->path->bus->last_reset,
5108 SCSI_DELAY);
5109 probeschedule(periph);
5110 return(CAM_REQ_CMP);
5111 }
5112
5113 static void
5114 probeschedule(struct cam_periph *periph)
5115 {
5116 struct ccb_pathinq cpi;
5117 union ccb *ccb;
5118 probe_softc *softc;
5119
5120 softc = (probe_softc *)periph->softc;
5121 ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs);
5122
5123 xpt_setup_ccb(&cpi.ccb_h, periph->path, /*priority*/1);
5124 cpi.ccb_h.func_code = XPT_PATH_INQ;
5125 xpt_action((union ccb *)&cpi);
5126
5127 /*
5128 * If a device has gone away and another device, or the same one,
5129 * is back in the same place, it should have a unit attention
5130 * condition pending. It will not report the unit attention in
5131 * response to an inquiry, which may leave invalid transfer
5132 * negotiations in effect. The TUR will reveal the unit attention
5133 * condition. Only send the TUR for lun 0, since some devices
5134 * will get confused by commands other than inquiry to non-existent
5135 * luns. If you think a device has gone away start your scan from
5136 * lun 0. This will insure that any bogus transfer settings are
5137 * invalidated.
5138 *
5139 * If we haven't seen the device before and the controller supports
5140 * some kind of transfer negotiation, negotiate with the first
5141 * sent command if no bus reset was performed at startup. This
5142 * ensures that the device is not confused by transfer negotiation
5143 * settings left over by loader or BIOS action.
5144 */
5145 if (((ccb->ccb_h.path->device->flags & CAM_DEV_UNCONFIGURED) == 0)
5146 && (ccb->ccb_h.target_lun == 0)) {
5147 softc->action = PROBE_TUR;
5148 } else if ((cpi.hba_inquiry & (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE)) != 0
5149 && (cpi.hba_misc & PIM_NOBUSRESET) != 0) {
5150 proberequestdefaultnegotiation(periph);
5151 softc->action = PROBE_INQUIRY;
5152 } else {
5153 softc->action = PROBE_INQUIRY;
5154 }
5155
5156 if (ccb->crcn.flags & CAM_EXPECT_INQ_CHANGE)
5157 softc->flags |= PROBE_NO_ANNOUNCE;
5158 else
5159 softc->flags &= ~PROBE_NO_ANNOUNCE;
5160
5161 xpt_schedule(periph, ccb->ccb_h.pinfo.priority);
5162 }
5163
5164 static void
5165 probestart(struct cam_periph *periph, union ccb *start_ccb)
5166 {
5167 /* Probe the device that our peripheral driver points to */
5168 struct ccb_scsiio *csio;
5169 probe_softc *softc;
5170
5171 CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probestart\n"));
5172
5173 softc = (probe_softc *)periph->softc;
5174 csio = &start_ccb->csio;
5175
5176 switch (softc->action) {
5177 case PROBE_TUR:
5178 case PROBE_TUR_FOR_NEGOTIATION:
5179 {
5180 scsi_test_unit_ready(csio,
5181 /*retries*/4,
5182 probedone,
5183 MSG_SIMPLE_Q_TAG,
5184 SSD_FULL_SIZE,
5185 /*timeout*/60000);
5186 break;
5187 }
5188 case PROBE_INQUIRY:
5189 {
5190 struct scsi_inquiry_data *inq_buf;
5191
5192 inq_buf = &periph->path->device->inq_data;
5193 /*
5194 * If the device is currently configured, we calculate an
5195 * MD5 checksum of the inquiry data, and if the serial number
5196 * length is greater than 0, add the serial number data
5197 * into the checksum as well. Once the inquiry and the
5198 * serial number check finish, we attempt to figure out
5199 * whether we still have the same device.
5200 */
5201 if ((periph->path->device->flags & CAM_DEV_UNCONFIGURED) == 0) {
5202
5203 MD5Init(&softc->context);
5204 MD5Update(&softc->context, (unsigned char *)inq_buf,
5205 sizeof(struct scsi_inquiry_data));
5206 softc->flags |= PROBE_INQUIRY_CKSUM;
5207 if (periph->path->device->serial_num_len > 0) {
5208 MD5Update(&softc->context,
5209 periph->path->device->serial_num,
5210 periph->path->device->serial_num_len);
5211 softc->flags |= PROBE_SERIAL_CKSUM;
5212 }
5213 MD5Final(softc->digest, &softc->context);
5214 }
5215
5216 scsi_inquiry(csio,
5217 /*retries*/4,
5218 probedone,
5219 MSG_SIMPLE_Q_TAG,
5220 (u_int8_t *)inq_buf,
5221 sizeof(*inq_buf),
5222 /*evpd*/FALSE,
5223 /*page_code*/0,
5224 SSD_MIN_SIZE,
5225 /*timeout*/60 * 1000);
5226 break;
5227 }
5228 case PROBE_MODE_SENSE:
5229 {
5230 void *mode_buf;
5231 int mode_buf_len;
5232
5233 mode_buf_len = sizeof(struct scsi_mode_header_6)
5234 + sizeof(struct scsi_mode_blk_desc)
5235 + sizeof(struct scsi_control_page);
5236 mode_buf = malloc(mode_buf_len, M_TEMP, M_NOWAIT);
5237 if (mode_buf != NULL) {
5238 scsi_mode_sense(csio,
5239 /*retries*/4,
5240 probedone,
5241 MSG_SIMPLE_Q_TAG,
5242 /*dbd*/FALSE,
5243 SMS_PAGE_CTRL_CURRENT,
5244 SMS_CONTROL_MODE_PAGE,
5245 mode_buf,
5246 mode_buf_len,
5247 SSD_FULL_SIZE,
5248 /*timeout*/60000);
5249 break;
5250 }
5251 xpt_print_path(periph->path);
5252 printf("Unable to mode sense control page - malloc failure\n");
5253 softc->action = PROBE_SERIAL_NUM;
5254 /* FALLTHROUGH */
5255 }
5256 case PROBE_SERIAL_NUM:
5257 {
5258 struct scsi_vpd_unit_serial_number *serial_buf;
5259 struct cam_ed* device;
5260
5261 serial_buf = NULL;
5262 device = periph->path->device;
5263 device->serial_num = NULL;
5264 device->serial_num_len = 0;
5265
5266 if ((device->quirk->quirks & CAM_QUIRK_NOSERIAL) == 0)
5267 serial_buf = (struct scsi_vpd_unit_serial_number *)
5268 malloc(sizeof(*serial_buf), M_TEMP, M_NOWAIT);
5269
5270 if (serial_buf != NULL) {
5271 bzero(serial_buf, sizeof(*serial_buf));
5272 scsi_inquiry(csio,
5273 /*retries*/4,
5274 probedone,
5275 MSG_SIMPLE_Q_TAG,
5276 (u_int8_t *)serial_buf,
5277 sizeof(*serial_buf),
5278 /*evpd*/TRUE,
5279 SVPD_UNIT_SERIAL_NUMBER,
5280 SSD_MIN_SIZE,
5281 /*timeout*/60 * 1000);
5282 break;
5283 }
5284 /*
5285 * We'll have to do without, let our probedone
5286 * routine finish up for us.
5287 */
5288 start_ccb->csio.data_ptr = NULL;
5289 probedone(periph, start_ccb);
5290 return;
5291 }
5292 }
5293 xpt_action(start_ccb);
5294 }
5295
5296 static void
5297 proberequestdefaultnegotiation(struct cam_periph *periph)
5298 {
5299 struct ccb_trans_settings cts;
5300
5301 xpt_setup_ccb(&cts.ccb_h, periph->path, /*priority*/1);
5302 cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
5303 cts.flags = CCB_TRANS_USER_SETTINGS;
5304 xpt_action((union ccb *)&cts);
5305 cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
5306 cts.flags &= ~CCB_TRANS_USER_SETTINGS;
5307 cts.flags |= CCB_TRANS_CURRENT_SETTINGS;
5308 xpt_action((union ccb *)&cts);
5309 }
5310
5311 static void
5312 probedone(struct cam_periph *periph, union ccb *done_ccb)
5313 {
5314 probe_softc *softc;
5315 struct cam_path *path;
5316 u_int32_t priority;
5317
5318 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probedone\n"));
5319
5320 softc = (probe_softc *)periph->softc;
5321 path = done_ccb->ccb_h.path;
5322 priority = done_ccb->ccb_h.pinfo.priority;
5323
5324 switch (softc->action) {
5325 case PROBE_TUR:
5326 {
5327 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
5328
5329 if (cam_periph_error(done_ccb, 0,
5330 SF_NO_PRINT, NULL) == ERESTART)
5331 return;
5332 else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
5333 /* Don't wedge the queue */
5334 xpt_release_devq(done_ccb->ccb_h.path->device,
5335 /*run_queue*/TRUE);
5336 }
5337 softc->action = PROBE_INQUIRY;
5338 xpt_release_ccb(done_ccb);
5339 xpt_schedule(periph, priority);
5340 return;
5341 }
5342 case PROBE_INQUIRY:
5343 {
5344 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5345 struct scsi_inquiry_data *inq_buf;
5346 u_int8_t periph_qual;
5347 u_int8_t periph_dtype;
5348
5349 path->device->flags |= CAM_DEV_INQUIRY_DATA_VALID;
5350 inq_buf = &path->device->inq_data;
5351
5352 periph_qual = SID_QUAL(inq_buf);
5353 periph_dtype = SID_TYPE(inq_buf);
5354 if (periph_dtype != T_NODEVICE) {
5355 switch(periph_qual) {
5356 case SID_QUAL_LU_CONNECTED:
5357 {
5358 xpt_find_quirk(path->device);
5359
5360 if ((inq_buf->flags & SID_CmdQue) != 0)
5361 softc->action =
5362 PROBE_MODE_SENSE;
5363 else
5364 softc->action =
5365 PROBE_SERIAL_NUM;
5366
5367 path->device->flags &=
5368 ~CAM_DEV_UNCONFIGURED;
5369
5370 xpt_release_ccb(done_ccb);
5371 xpt_schedule(periph, priority);
5372 return;
5373 }
5374 default:
5375 break;
5376 }
5377 }
5378 } else if (cam_periph_error(done_ccb, 0,
5379 done_ccb->ccb_h.target_lun > 0
5380 ? SF_RETRY_UA|SF_QUIET_IR
5381 : SF_RETRY_UA,
5382 &softc->saved_ccb) == ERESTART) {
5383 return;
5384 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5385 /* Don't wedge the queue */
5386 xpt_release_devq(done_ccb->ccb_h.path->device,
5387 /*run_queue*/TRUE);
5388 }
5389 /*
5390 * If we get to this point, we got an error status back
5391 * from the inquiry and the error status doesn't require
5392 * automatically retrying the command. Therefore, the
5393 * inquiry failed. If we had inquiry information before
5394 * for this device, but this latest inquiry command failed,
5395 * the device has probably gone away. If this device isn't
5396 * already marked unconfigured, notify the peripheral
5397 * drivers that this device is no more.
5398 */
5399 if ((path->device->flags & CAM_DEV_UNCONFIGURED) == 0)
5400 /* Send the async notification. */
5401 xpt_async(AC_LOST_DEVICE, path, NULL);
5402
5403 xpt_release_ccb(done_ccb);
5404 break;
5405 }
5406 case PROBE_MODE_SENSE:
5407 {
5408 struct ccb_scsiio *csio;
5409 struct scsi_mode_header_6 *mode_hdr;
5410
5411 csio = &done_ccb->csio;
5412 mode_hdr = (struct scsi_mode_header_6 *)csio->data_ptr;
5413 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5414 struct scsi_control_page *page;
5415 u_int8_t *offset;
5416
5417 offset = ((u_int8_t *)&mode_hdr[1])
5418 + mode_hdr->blk_desc_len;
5419 page = (struct scsi_control_page *)offset;
5420 path->device->queue_flags = page->queue_flags;
5421 } else if (cam_periph_error(done_ccb, 0,
5422 SF_RETRY_UA|SF_NO_PRINT,
5423 &softc->saved_ccb) == ERESTART) {
5424 return;
5425 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5426 /* Don't wedge the queue */
5427 xpt_release_devq(done_ccb->ccb_h.path->device,
5428 /*run_queue*/TRUE);
5429 }
5430 xpt_release_ccb(done_ccb);
5431 free(mode_hdr, M_TEMP);
5432 softc->action = PROBE_SERIAL_NUM;
5433 xpt_schedule(periph, priority);
5434 return;
5435 }
5436 case PROBE_SERIAL_NUM:
5437 {
5438 struct ccb_scsiio *csio;
5439 struct scsi_vpd_unit_serial_number *serial_buf;
5440 u_int32_t priority;
5441 int changed;
5442 int have_serialnum;
5443
5444 changed = 1;
5445 have_serialnum = 0;
5446 csio = &done_ccb->csio;
5447 priority = done_ccb->ccb_h.pinfo.priority;
5448 serial_buf =
5449 (struct scsi_vpd_unit_serial_number *)csio->data_ptr;
5450
5451 /* Clean up from previous instance of this device */
5452 if (path->device->serial_num != NULL) {
5453 free(path->device->serial_num, M_DEVBUF);
5454 path->device->serial_num = NULL;
5455 path->device->serial_num_len = 0;
5456 }
5457
5458 if (serial_buf == NULL) {
5459 /*
5460 * Don't process the command as it was never sent
5461 */
5462 } else if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP
5463 && (serial_buf->length > 0)) {
5464
5465 have_serialnum = 1;
5466 path->device->serial_num =
5467 (u_int8_t *)malloc((serial_buf->length + 1),
5468 M_DEVBUF, M_NOWAIT);
5469 if (path->device->serial_num != NULL) {
5470 bcopy(serial_buf->serial_num,
5471 path->device->serial_num,
5472 serial_buf->length);
5473 path->device->serial_num_len =
5474 serial_buf->length;
5475 path->device->serial_num[serial_buf->length]
5476 = '\0';
5477 }
5478 } else if (cam_periph_error(done_ccb, 0,
5479 SF_RETRY_UA|SF_NO_PRINT,
5480 &softc->saved_ccb) == ERESTART) {
5481 return;
5482 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5483 /* Don't wedge the queue */
5484 xpt_release_devq(done_ccb->ccb_h.path->device,
5485 /*run_queue*/TRUE);
5486 }
5487
5488 /*
5489 * Let's see if we have seen this device before.
5490 */
5491 if ((softc->flags & PROBE_INQUIRY_CKSUM) != 0) {
5492 MD5_CTX context;
5493 u_int8_t digest[16];
5494
5495 MD5Init(&context);
5496
5497 MD5Update(&context,
5498 (unsigned char *)&path->device->inq_data,
5499 sizeof(struct scsi_inquiry_data));
5500
5501 if (have_serialnum)
5502 MD5Update(&context, serial_buf->serial_num,
5503 serial_buf->length);
5504
5505 MD5Final(digest, &context);
5506 if (bcmp(softc->digest, digest, 16) == 0)
5507 changed = 0;
5508
5509 /*
5510 * XXX Do we need to do a TUR in order to ensure
5511 * that the device really hasn't changed???
5512 */
5513 if ((changed != 0)
5514 && ((softc->flags & PROBE_NO_ANNOUNCE) == 0))
5515 xpt_async(AC_LOST_DEVICE, path, NULL);
5516 }
5517 if (serial_buf != NULL)
5518 free(serial_buf, M_TEMP);
5519
5520 if (changed != 0) {
5521 /*
5522 * Now that we have all the necessary
5523 * information to safely perform transfer
5524 * negotiations... Controllers don't perform
5525 * any negotiation or tagged queuing until
5526 * after the first XPT_SET_TRAN_SETTINGS ccb is
5527 * received. So, on a new device, just retreive
5528 * the user settings, and set them as the current
5529 * settings to set the device up.
5530 */
5531 proberequestdefaultnegotiation(periph);
5532 xpt_release_ccb(done_ccb);
5533
5534 /*
5535 * Perform a TUR to allow the controller to
5536 * perform any necessary transfer negotiation.
5537 */
5538 softc->action = PROBE_TUR_FOR_NEGOTIATION;
5539 xpt_schedule(periph, priority);
5540 return;
5541 }
5542 xpt_release_ccb(done_ccb);
5543 break;
5544 }
5545 case PROBE_TUR_FOR_NEGOTIATION:
5546 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5547 /* Don't wedge the queue */
5548 xpt_release_devq(done_ccb->ccb_h.path->device,
5549 /*run_queue*/TRUE);
5550 }
5551
5552 path->device->flags &= ~CAM_DEV_UNCONFIGURED;
5553
5554 if ((softc->flags & PROBE_NO_ANNOUNCE) == 0) {
5555 /* Inform the XPT that a new device has been found */
5556 done_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
5557 xpt_action(done_ccb);
5558
5559 xpt_async(AC_FOUND_DEVICE, xpt_periph->path, done_ccb);
5560 }
5561 xpt_release_ccb(done_ccb);
5562 break;
5563 }
5564 done_ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs);
5565 TAILQ_REMOVE(&softc->request_ccbs, &done_ccb->ccb_h, periph_links.tqe);
5566 done_ccb->ccb_h.status = CAM_REQ_CMP;
5567 xpt_done(done_ccb);
5568 if (TAILQ_FIRST(&softc->request_ccbs) == NULL) {
5569 cam_periph_invalidate(periph);
5570 cam_periph_release(periph);
5571 } else {
5572 probeschedule(periph);
5573 }
5574 }
5575
5576 static void
5577 probecleanup(struct cam_periph *periph)
5578 {
5579 free(periph->softc, M_TEMP);
5580 }
5581
5582 static void
5583 xpt_find_quirk(struct cam_ed *device)
5584 {
5585 caddr_t match;
5586
5587 match = cam_quirkmatch((caddr_t)&device->inq_data,
5588 (caddr_t)xpt_quirk_table,
5589 sizeof(xpt_quirk_table)/sizeof(*xpt_quirk_table),
5590 sizeof(*xpt_quirk_table), scsi_inquiry_match);
5591
5592 if (match == NULL)
5593 panic("xpt_find_quirk: device didn't match wildcard entry!!");
5594
5595 device->quirk = (struct xpt_quirk_entry *)match;
5596 }
5597
5598 static void
5599 xpt_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_ed *device,
5600 int async_update)
5601 {
5602 struct cam_sim *sim;
5603 int qfrozen;
5604
5605 sim = cts->ccb_h.path->bus->sim;
5606 if (async_update == FALSE) {
5607 struct scsi_inquiry_data *inq_data;
5608 struct ccb_pathinq cpi;
5609 struct ccb_trans_settings cur_cts;
5610
5611 if (device == NULL) {
5612 cts->ccb_h.status = CAM_PATH_INVALID;
5613 xpt_done((union ccb *)cts);
5614 return;
5615 }
5616
5617 /*
5618 * Perform sanity checking against what the
5619 * controller and device can do.
5620 */
5621 xpt_setup_ccb(&cpi.ccb_h, cts->ccb_h.path, /*priority*/1);
5622 cpi.ccb_h.func_code = XPT_PATH_INQ;
5623 xpt_action((union ccb *)&cpi);
5624 xpt_setup_ccb(&cur_cts.ccb_h, cts->ccb_h.path, /*priority*/1);
5625 cur_cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
5626 cur_cts.flags = CCB_TRANS_CURRENT_SETTINGS;
5627 xpt_action((union ccb *)&cur_cts);
5628 inq_data = &device->inq_data;
5629
5630 /* Fill in any gaps in what the user gave us */
5631 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) == 0)
5632 cts->sync_period = cur_cts.sync_period;
5633 if ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) == 0)
5634 cts->sync_offset = cur_cts.sync_offset;
5635 if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) == 0)
5636 cts->bus_width = cur_cts.bus_width;
5637 if ((cts->valid & CCB_TRANS_DISC_VALID) == 0) {
5638 cts->flags &= ~CCB_TRANS_DISC_ENB;
5639 cts->flags |= cur_cts.flags & CCB_TRANS_DISC_ENB;
5640 }
5641 if ((cts->valid & CCB_TRANS_TQ_VALID) == 0) {
5642 cts->flags &= ~CCB_TRANS_TAG_ENB;
5643 cts->flags |= cur_cts.flags & CCB_TRANS_TAG_ENB;
5644 }
5645 if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0
5646 && (inq_data->flags & SID_Sync) == 0)
5647 || (cpi.hba_inquiry & PI_SDTR_ABLE) == 0) {
5648 /* Force async */
5649 cts->sync_period = 0;
5650 cts->sync_offset = 0;
5651 }
5652
5653 switch (cts->bus_width) {
5654 case MSG_EXT_WDTR_BUS_32_BIT:
5655 if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
5656 || (inq_data->flags & SID_WBus32) != 0)
5657 && (cpi.hba_inquiry & PI_WIDE_32) != 0)
5658 break;
5659 /* Fall Through to 16-bit */
5660 case MSG_EXT_WDTR_BUS_16_BIT:
5661 if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
5662 || (inq_data->flags & SID_WBus16) != 0)
5663 && (cpi.hba_inquiry & PI_WIDE_16) != 0) {
5664 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
5665 break;
5666 }
5667 /* Fall Through to 8-bit */
5668 default: /* New bus width?? */
5669 case MSG_EXT_WDTR_BUS_8_BIT:
5670 /* All targets can do this */
5671 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
5672 break;
5673 }
5674
5675 if ((cts->flags & CCB_TRANS_DISC_ENB) == 0) {
5676 /*
5677 * Can't tag queue without disconnection.
5678 */
5679 cts->flags &= ~CCB_TRANS_TAG_ENB;
5680 cts->valid |= CCB_TRANS_TQ_VALID;
5681 }
5682
5683 if ((cpi.hba_inquiry & PI_TAG_ABLE) == 0
5684 || (inq_data->flags & SID_CmdQue) == 0
5685 || (device->queue_flags & SCP_QUEUE_DQUE) != 0
5686 || (device->quirk->mintags == 0)) {
5687 /*
5688 * Can't tag on hardware that doesn't support,
5689 * doesn't have it enabled, or has broken tag support.
5690 */
5691 cts->flags &= ~CCB_TRANS_TAG_ENB;
5692 }
5693 }
5694
5695 qfrozen = FALSE;
5696 if ((cts->valid & CCB_TRANS_TQ_VALID) != 0
5697 && (async_update == FALSE)) {
5698 int device_tagenb;
5699
5700 /*
5701 * If we are transitioning from tags to no-tags or
5702 * vice-versa, we need to carefully freeze and restart
5703 * the queue so that we don't overlap tagged and non-tagged
5704 * commands. We also temporarily stop tags if there is
5705 * a change in transfer negotiation settings to allow
5706 * "tag-less" negotiation.
5707 */
5708 if ((device->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
5709 || (device->inq_flags & SID_CmdQue) != 0)
5710 device_tagenb = TRUE;
5711 else
5712 device_tagenb = FALSE;
5713
5714 if (((cts->flags & CCB_TRANS_TAG_ENB) != 0
5715 && device_tagenb == FALSE)
5716 || ((cts->flags & CCB_TRANS_TAG_ENB) == 0
5717 && device_tagenb == TRUE)) {
5718
5719 if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) {
5720 /*
5721 * Delay change to use tags until after a
5722 * few commands have gone to this device so
5723 * the controller has time to perform transfer
5724 * negotiations without tagged messages getting
5725 * in the way.
5726 */
5727 device->tag_delay_count = CAM_TAG_DELAY_COUNT;
5728 device->flags |= CAM_DEV_TAG_AFTER_COUNT;
5729 } else {
5730 xpt_freeze_devq(cts->ccb_h.path, /*count*/1);
5731 qfrozen = TRUE;
5732 device->inq_flags &= ~SID_CmdQue;
5733 xpt_dev_ccbq_resize(cts->ccb_h.path,
5734 sim->max_dev_openings);
5735 device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
5736 device->tag_delay_count = 0;
5737 }
5738 }
5739 }
5740
5741 if (async_update == FALSE) {
5742 /*
5743 * If we are currently performing tagged transactions to
5744 * this device and want to change its negotiation parameters,
5745 * go non-tagged for a bit to give the controller a chance to
5746 * negotiate unhampered by tag messages.
5747 */
5748 if ((device->inq_flags & SID_CmdQue) != 0
5749 && (cts->flags & (CCB_TRANS_SYNC_RATE_VALID|
5750 CCB_TRANS_SYNC_OFFSET_VALID|
5751 CCB_TRANS_BUS_WIDTH_VALID)) != 0)
5752 xpt_toggle_tags(cts->ccb_h.path);
5753
5754 (*(sim->sim_action))(sim, (union ccb *)cts);
5755 }
5756
5757 if (qfrozen) {
5758 struct ccb_relsim crs;
5759
5760 xpt_setup_ccb(&crs.ccb_h, cts->ccb_h.path,
5761 /*priority*/1);
5762 crs.ccb_h.func_code = XPT_REL_SIMQ;
5763 crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
5764 crs.openings
5765 = crs.release_timeout
5766 = crs.qfrozen_cnt
5767 = 0;
5768 xpt_action((union ccb *)&crs);
5769 }
5770 }
5771
5772 static void
5773 xpt_toggle_tags(struct cam_path *path)
5774 {
5775 struct cam_ed *dev;
5776
5777 /*
5778 * Give controllers a chance to renegotiate
5779 * before starting tag operations. We
5780 * "toggle" tagged queuing off then on
5781 * which causes the tag enable command delay
5782 * counter to come into effect.
5783 */
5784 dev = path->device;
5785 if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
5786 || ((dev->inq_flags & SID_CmdQue) != 0
5787 && (dev->inq_flags & (SID_Sync|SID_WBus16|SID_WBus32)) != 0)) {
5788 struct ccb_trans_settings cts;
5789
5790 xpt_setup_ccb(&cts.ccb_h, path, 1);
5791 cts.flags = 0;
5792 cts.valid = CCB_TRANS_TQ_VALID;
5793 xpt_set_transfer_settings(&cts, path->device,
5794 /*async_update*/TRUE);
5795 cts.flags = CCB_TRANS_TAG_ENB;
5796 xpt_set_transfer_settings(&cts, path->device,
5797 /*async_update*/TRUE);
5798 }
5799 }
5800
5801 static void
5802 xpt_start_tags(struct cam_path *path)
5803 {
5804 struct ccb_relsim crs;
5805 struct cam_ed *device;
5806 struct cam_sim *sim;
5807 int newopenings;
5808
5809 device = path->device;
5810 sim = path->bus->sim;
5811 device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
5812 xpt_freeze_devq(path, /*count*/1);
5813 device->inq_flags |= SID_CmdQue;
5814 newopenings = min(device->quirk->maxtags, sim->max_tagged_dev_openings);
5815 xpt_dev_ccbq_resize(path, newopenings);
5816 xpt_setup_ccb(&crs.ccb_h, path, /*priority*/1);
5817 crs.ccb_h.func_code = XPT_REL_SIMQ;
5818 crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
5819 crs.openings
5820 = crs.release_timeout
5821 = crs.qfrozen_cnt
5822 = 0;
5823 xpt_action((union ccb *)&crs);
5824 }
5825
5826 static int busses_to_config;
5827 static int busses_to_reset;
5828
5829 static int
5830 xptconfigbuscountfunc(struct cam_eb *bus, void *arg)
5831 {
5832 if (bus->path_id != CAM_XPT_PATH_ID) {
5833 struct cam_path path;
5834 struct ccb_pathinq cpi;
5835 int can_negotiate;
5836
5837 busses_to_config++;
5838 xpt_compile_path(&path, NULL, bus->path_id,
5839 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
5840 xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
5841 cpi.ccb_h.func_code = XPT_PATH_INQ;
5842 xpt_action((union ccb *)&cpi);
5843 can_negotiate = cpi.hba_inquiry;
5844 can_negotiate &= (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE);
5845 if ((cpi.hba_misc & PIM_NOBUSRESET) == 0
5846 && can_negotiate)
5847 busses_to_reset++;
5848 xpt_release_path(&path);
5849 }
5850
5851 return(1);
5852 }
5853
5854 static int
5855 xptconfigfunc(struct cam_eb *bus, void *arg)
5856 {
5857 struct cam_path *path;
5858 union ccb *work_ccb;
5859
5860 if (bus->path_id != CAM_XPT_PATH_ID) {
5861 cam_status status;
5862 int can_negotiate;
5863
5864 work_ccb = xpt_alloc_ccb();
5865 if ((status = xpt_create_path(&path, xpt_periph, bus->path_id,
5866 CAM_TARGET_WILDCARD,
5867 CAM_LUN_WILDCARD)) !=CAM_REQ_CMP){
5868 printf("xptconfigfunc: xpt_create_path failed with "
5869 "status %#x for bus %d\n", status, bus->path_id);
5870 printf("xptconfigfunc: halting bus configuration\n");
5871 xpt_free_ccb(work_ccb);
5872 busses_to_config--;
5873 xpt_finishconfig(xpt_periph, NULL);
5874 return(0);
5875 }
5876 xpt_setup_ccb(&work_ccb->ccb_h, path, /*priority*/1);
5877 work_ccb->ccb_h.func_code = XPT_PATH_INQ;
5878 xpt_action(work_ccb);
5879 if (work_ccb->ccb_h.status != CAM_REQ_CMP) {
5880 printf("xptconfigfunc: CPI failed on bus %d "
5881 "with status %d\n", bus->path_id,
5882 work_ccb->ccb_h.status);
5883 xpt_finishconfig(xpt_periph, work_ccb);
5884 return(1);
5885 }
5886
5887 can_negotiate = work_ccb->cpi.hba_inquiry;
5888 can_negotiate &= (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE);
5889 if ((work_ccb->cpi.hba_misc & PIM_NOBUSRESET) == 0
5890 && (can_negotiate != 0)) {
5891 xpt_setup_ccb(&work_ccb->ccb_h, path, /*priority*/1);
5892 work_ccb->ccb_h.func_code = XPT_RESET_BUS;
5893 work_ccb->ccb_h.cbfcnp = NULL;
5894 CAM_DEBUG(path, CAM_DEBUG_SUBTRACE,
5895 ("Resetting Bus\n"));
5896 xpt_action(work_ccb);
5897 xpt_finishconfig(xpt_periph, work_ccb);
5898 } else {
5899 /* Act as though we performed a successful BUS RESET */
5900 work_ccb->ccb_h.func_code = XPT_RESET_BUS;
5901 xpt_finishconfig(xpt_periph, work_ccb);
5902 }
5903 }
5904
5905 return(1);
5906 }
5907
5908 static void
5909 xpt_config(void *arg)
5910 {
5911 /* Now that interrupts are enabled, go find our devices */
5912
5913 #ifdef CAMDEBUG
5914 /* Setup debugging flags and path */
5915 #ifdef CAM_DEBUG_FLAGS
5916 cam_dflags = CAM_DEBUG_FLAGS;
5917 #else /* !CAM_DEBUG_FLAGS */
5918 cam_dflags = CAM_DEBUG_NONE;
5919 #endif /* CAM_DEBUG_FLAGS */
5920 #ifdef CAM_DEBUG_BUS
5921 if (cam_dflags != CAM_DEBUG_NONE) {
5922 if (xpt_create_path(&cam_dpath, xpt_periph,
5923 CAM_DEBUG_BUS, CAM_DEBUG_TARGET,
5924 CAM_DEBUG_LUN) != CAM_REQ_CMP) {
5925 printf("xpt_config: xpt_create_path() failed for debug"
5926 " target %d:%d:%d, debugging disabled\n",
5927 CAM_DEBUG_BUS, CAM_DEBUG_TARGET, CAM_DEBUG_LUN);
5928 cam_dflags = CAM_DEBUG_NONE;
5929 }
5930 } else
5931 cam_dpath = NULL;
5932 #else /* !CAM_DEBUG_BUS */
5933 cam_dpath = NULL;
5934 #endif /* CAM_DEBUG_BUS */
5935 #endif /* CAMDEBUG */
5936
5937 /*
5938 * Scan all installed busses.
5939 */
5940 xpt_for_all_busses(xptconfigbuscountfunc, NULL);
5941
5942 if (busses_to_config == 0) {
5943 /* Call manually because we don't have any busses */
5944 xpt_finishconfig(xpt_periph, NULL);
5945 } else {
5946 if (busses_to_reset > 0 && SCSI_DELAY >= 2000) {
5947 printf("Waiting %d seconds for SCSI "
5948 "devices to settle\n", SCSI_DELAY/1000);
5949 }
5950 xpt_for_all_busses(xptconfigfunc, NULL);
5951 }
5952 }
5953
5954 /*
5955 * If the given device only has one peripheral attached to it, and if that
5956 * peripheral is the passthrough driver, announce it. This insures that the
5957 * user sees some sort of announcement for every peripheral in their system.
5958 */
5959 static int
5960 xptpassannouncefunc(struct cam_ed *device, void *arg)
5961 {
5962 struct cam_periph *periph;
5963 int i;
5964
5965 for (periph = SLIST_FIRST(&device->periphs), i = 0; periph != NULL;
5966 periph = SLIST_NEXT(periph, periph_links), i++);
5967
5968 periph = SLIST_FIRST(&device->periphs);
5969 if ((i == 1)
5970 && (strncmp(periph->periph_name, "pass", 4) == 0))
5971 xpt_announce_periph(periph, NULL);
5972
5973 return(1);
5974 }
5975
5976 static void
5977 xpt_finishconfig(struct cam_periph *periph, union ccb *done_ccb)
5978 {
5979 struct periph_driver **p_drv;
5980 int i;
5981
5982 if (done_ccb != NULL) {
5983 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE,
5984 ("xpt_finishconfig\n"));
5985 switch(done_ccb->ccb_h.func_code) {
5986 case XPT_RESET_BUS:
5987 if (done_ccb->ccb_h.status == CAM_REQ_CMP) {
5988 done_ccb->ccb_h.func_code = XPT_SCAN_BUS;
5989 done_ccb->ccb_h.cbfcnp = xpt_finishconfig;
5990 xpt_action(done_ccb);
5991 return;
5992 }
5993 /* FALLTHROUGH */
5994 case XPT_SCAN_BUS:
5995 default:
5996 xpt_free_path(done_ccb->ccb_h.path);
5997 busses_to_config--;
5998 break;
5999 }
6000 }
6001
6002 if (busses_to_config == 0) {
6003 /* Register all the peripheral drivers */
6004 /* XXX This will have to change when we have LKMs */
6005 p_drv = (struct periph_driver **)periphdriver_set.ls_items;
6006 for (i = 0; p_drv[i] != NULL; i++) {
6007 (*p_drv[i]->init)();
6008 }
6009
6010 /*
6011 * Check for devices with no "standard" peripheral driver
6012 * attached. For any devices like that, announce the
6013 * passthrough driver so the user will see something.
6014 */
6015 xpt_for_all_devices(xptpassannouncefunc, NULL);
6016
6017 /* Release our hook so that the boot can continue. */
6018 config_intrhook_disestablish(xpt_config_hook);
6019 }
6020 if (done_ccb != NULL)
6021 xpt_free_ccb(done_ccb);
6022 }
6023
6024 static void
6025 xptaction(struct cam_sim *sim, union ccb *work_ccb)
6026 {
6027 CAM_DEBUG(work_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xptaction\n"));
6028
6029 switch (work_ccb->ccb_h.func_code) {
6030 /* Common cases first */
6031 case XPT_PATH_INQ: /* Path routing inquiry */
6032 {
6033 struct ccb_pathinq *cpi;
6034
6035 cpi = &work_ccb->cpi;
6036 cpi->version_num = 1; /* XXX??? */
6037 cpi->hba_inquiry = 0;
6038 cpi->target_sprt = 0;
6039 cpi->hba_misc = 0;
6040 cpi->hba_eng_cnt = 0;
6041 cpi->max_target = 0;
6042 cpi->max_lun = 0;
6043 cpi->initiator_id = 0;
6044 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
6045 strncpy(cpi->hba_vid, "", HBA_IDLEN);
6046 strncpy(cpi->dev_name, sim->sim_name, DEV_IDLEN);
6047 cpi->unit_number = sim->unit_number;
6048 cpi->bus_id = sim->bus_id;
6049 cpi->base_transfer_speed = 0;
6050 cpi->ccb_h.status = CAM_REQ_CMP;
6051 xpt_done(work_ccb);
6052 break;
6053 }
6054 default:
6055 work_ccb->ccb_h.status = CAM_REQ_INVALID;
6056 xpt_done(work_ccb);
6057 break;
6058 }
6059 }
6060
6061 /*
6062 * Should only be called by the machine interrupt dispatch routines,
6063 * so put these prototypes here instead of in the header.
6064 */
6065
6066 static void
6067 swi_camnet(void)
6068 {
6069 camisr(&cam_netq);
6070 }
6071
6072 static void
6073 swi_cambio(void)
6074 {
6075 camisr(&cam_bioq);
6076 }
6077
6078 static void
6079 camisr(cam_isrq_t *queue)
6080 {
6081 int s;
6082 struct ccb_hdr *ccb_h;
6083
6084 s = splcam();
6085 while ((ccb_h = TAILQ_FIRST(queue)) != NULL) {
6086 int runq;
6087
6088 TAILQ_REMOVE(queue, ccb_h, sim_links.tqe);
6089 ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
6090 splx(s);
6091
6092 CAM_DEBUG(ccb_h->path, CAM_DEBUG_TRACE,
6093 ("camisr"));
6094
6095 runq = FALSE;
6096
6097 if (ccb_h->flags & CAM_HIGH_POWER) {
6098 struct highpowerlist *hphead;
6099 struct cam_ed *device;
6100 union ccb *send_ccb;
6101
6102 hphead = &highpowerq;
6103
6104 send_ccb = (union ccb *)STAILQ_FIRST(hphead);
6105
6106 /*
6107 * Increment the count since this command is done.
6108 */
6109 num_highpower++;
6110
6111 /*
6112 * Any high powered commands queued up?
6113 */
6114 if (send_ccb != NULL) {
6115 device = send_ccb->ccb_h.path->device;
6116
6117 STAILQ_REMOVE_HEAD(hphead, xpt_links.stqe);
6118
6119 xpt_release_devq(send_ccb->ccb_h.path->device,
6120 TRUE);
6121 }
6122 }
6123 if ((ccb_h->func_code & XPT_FC_USER_CCB) == 0) {
6124 struct cam_ed *dev;
6125
6126 dev = ccb_h->path->device;
6127
6128 s = splcam();
6129 cam_ccbq_ccb_done(&dev->ccbq, (union ccb *)ccb_h);
6130
6131 ccb_h->path->bus->sim->devq->send_active--;
6132 ccb_h->path->bus->sim->devq->send_openings++;
6133 splx(s);
6134
6135 if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0
6136 || ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
6137 && (dev->ccbq.dev_active == 0))) {
6138
6139 xpt_release_devq(ccb_h->path->device,
6140 /*run_queue*/TRUE);
6141 }
6142
6143 if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
6144 && (--dev->tag_delay_count == 0))
6145 xpt_start_tags(ccb_h->path);
6146
6147 if ((dev->ccbq.queue.entries > 0)
6148 && (dev->qfrozen_cnt == 0)
6149 && (device_is_send_queued(dev) == 0)) {
6150 runq = xpt_schedule_dev_sendq(ccb_h->path->bus,
6151 dev);
6152 }
6153 }
6154
6155 if (ccb_h->status & CAM_RELEASE_SIMQ) {
6156 xpt_release_simq(ccb_h->path->bus->sim,
6157 /*run_queue*/TRUE);
6158 } else if ((ccb_h->flags & CAM_DEV_QFRZDIS)
6159 && (ccb_h->status & CAM_DEV_QFRZN)) {
6160 xpt_release_devq(ccb_h->path->device,
6161 /*run_queue*/TRUE);
6162 ccb_h->status &= ~CAM_DEV_QFRZN;
6163 } else if (runq) {
6164 xpt_run_dev_sendq(ccb_h->path->bus);
6165 }
6166
6167 /* Call the peripheral driver's callback */
6168 (*ccb_h->cbfcnp)(ccb_h->path->periph,
6169 (union ccb *)ccb_h);
6170
6171 /* Raise IPL for while test */
6172 s = splcam();
6173 }
6174 splx(s);
6175 }
Cache object: b508a9b4ea68df7efd84764d265d7213
|