1 /* $NetBSD: scsipi_base.c,v 1.104.2.2 2004/09/11 12:53:16 he Exp $ */
2
3 /*-
4 * Copyright (c) 1998, 1999, 2000, 2002, 2003 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum; by Jason R. Thorpe of the Numerical Aerospace
9 * Simulation Facility, NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 #include <sys/cdefs.h>
41 __KERNEL_RCSID(0, "$NetBSD: scsipi_base.c,v 1.104.2.2 2004/09/11 12:53:16 he Exp $");
42
43 #include "opt_scsi.h"
44
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/kernel.h>
48 #include <sys/buf.h>
49 #include <sys/uio.h>
50 #include <sys/malloc.h>
51 #include <sys/pool.h>
52 #include <sys/errno.h>
53 #include <sys/device.h>
54 #include <sys/proc.h>
55 #include <sys/kthread.h>
56 #include <sys/hash.h>
57
58 #include <uvm/uvm_extern.h>
59
60 #include <dev/scsipi/scsipi_all.h>
61 #include <dev/scsipi/scsipi_disk.h>
62 #include <dev/scsipi/scsipiconf.h>
63 #include <dev/scsipi/scsipi_base.h>
64
65 #include <dev/scsipi/scsi_all.h>
66 #include <dev/scsipi/scsi_message.h>
67
68 int scsipi_complete __P((struct scsipi_xfer *));
69 void scsipi_request_sense __P((struct scsipi_xfer *));
70 int scsipi_enqueue __P((struct scsipi_xfer *));
71 void scsipi_run_queue __P((struct scsipi_channel *chan));
72
73 void scsipi_completion_thread __P((void *));
74
75 void scsipi_get_tag __P((struct scsipi_xfer *));
76 void scsipi_put_tag __P((struct scsipi_xfer *));
77
78 int scsipi_get_resource __P((struct scsipi_channel *));
79 void scsipi_put_resource __P((struct scsipi_channel *));
80 __inline int scsipi_grow_resources __P((struct scsipi_channel *));
81
82 void scsipi_async_event_max_openings __P((struct scsipi_channel *,
83 struct scsipi_max_openings *));
84 void scsipi_async_event_xfer_mode __P((struct scsipi_channel *,
85 struct scsipi_xfer_mode *));
86 void scsipi_async_event_channel_reset __P((struct scsipi_channel *));
87
88 struct pool scsipi_xfer_pool;
89
90 /*
91 * scsipi_init:
92 *
93 * Called when a scsibus or atapibus is attached to the system
94 * to initialize shared data structures.
95 */
96 void
97 scsipi_init()
98 {
99 static int scsipi_init_done;
100
101 if (scsipi_init_done)
102 return;
103 scsipi_init_done = 1;
104
105 /* Initialize the scsipi_xfer pool. */
106 pool_init(&scsipi_xfer_pool, sizeof(struct scsipi_xfer), 0,
107 0, 0, "scxspl", NULL);
108 if (pool_prime(&scsipi_xfer_pool,
109 PAGE_SIZE / sizeof(struct scsipi_xfer)) == ENOMEM) {
110 printf("WARNING: not enough memory for scsipi_xfer_pool\n");
111 }
112 }
113
114 /*
115 * scsipi_channel_init:
116 *
117 * Initialize a scsipi_channel when it is attached.
118 */
119 int
120 scsipi_channel_init(chan)
121 struct scsipi_channel *chan;
122 {
123 int i;
124
125 /* Initialize shared data. */
126 scsipi_init();
127
128 /* Initialize the queues. */
129 TAILQ_INIT(&chan->chan_queue);
130 TAILQ_INIT(&chan->chan_complete);
131
132 for (i = 0; i < SCSIPI_CHAN_PERIPH_BUCKETS; i++)
133 LIST_INIT(&chan->chan_periphtab[i]);
134
135 /*
136 * Create the asynchronous completion thread.
137 */
138 kthread_create(scsipi_create_completion_thread, chan);
139 return (0);
140 }
141
142 /*
143 * scsipi_channel_shutdown:
144 *
145 * Shutdown a scsipi_channel.
146 */
147 void
148 scsipi_channel_shutdown(chan)
149 struct scsipi_channel *chan;
150 {
151
152 /*
153 * Shut down the completion thread.
154 */
155 chan->chan_tflags |= SCSIPI_CHANT_SHUTDOWN;
156 wakeup(&chan->chan_complete);
157
158 /*
159 * Now wait for the thread to exit.
160 */
161 while (chan->chan_thread != NULL)
162 (void) tsleep(&chan->chan_thread, PRIBIO, "scshut", 0);
163 }
164
165 static uint32_t
166 scsipi_chan_periph_hash(uint64_t t, uint64_t l)
167 {
168 uint32_t hash;
169
170 hash = hash32_buf(&t, sizeof(t), HASH32_BUF_INIT);
171 hash = hash32_buf(&l, sizeof(l), hash);
172
173 return (hash & SCSIPI_CHAN_PERIPH_HASHMASK);
174 }
175
176 /*
177 * scsipi_insert_periph:
178 *
179 * Insert a periph into the channel.
180 */
181 void
182 scsipi_insert_periph(chan, periph)
183 struct scsipi_channel *chan;
184 struct scsipi_periph *periph;
185 {
186 uint32_t hash;
187 int s;
188
189 hash = scsipi_chan_periph_hash(periph->periph_target,
190 periph->periph_lun);
191
192 s = splbio();
193 LIST_INSERT_HEAD(&chan->chan_periphtab[hash], periph, periph_hash);
194 splx(s);
195 }
196
197 /*
198 * scsipi_remove_periph:
199 *
200 * Remove a periph from the channel.
201 */
202 void
203 scsipi_remove_periph(chan, periph)
204 struct scsipi_channel *chan;
205 struct scsipi_periph *periph;
206 {
207 int s;
208
209 s = splbio();
210 LIST_REMOVE(periph, periph_hash);
211 splx(s);
212 }
213
214 /*
215 * scsipi_lookup_periph:
216 *
217 * Lookup a periph on the specified channel.
218 */
219 struct scsipi_periph *
220 scsipi_lookup_periph(chan, target, lun)
221 struct scsipi_channel *chan;
222 int target, lun;
223 {
224 struct scsipi_periph *periph;
225 uint32_t hash;
226 int s;
227
228 if (target >= chan->chan_ntargets ||
229 lun >= chan->chan_nluns)
230 return (NULL);
231
232 hash = scsipi_chan_periph_hash(target, lun);
233
234 s = splbio();
235 LIST_FOREACH(periph, &chan->chan_periphtab[hash], periph_hash) {
236 if (periph->periph_target == target &&
237 periph->periph_lun == lun)
238 break;
239 }
240 splx(s);
241
242 return (periph);
243 }
244
245 /*
246 * scsipi_get_resource:
247 *
248 * Allocate a single xfer `resource' from the channel.
249 *
250 * NOTE: Must be called at splbio().
251 */
252 int
253 scsipi_get_resource(chan)
254 struct scsipi_channel *chan;
255 {
256 struct scsipi_adapter *adapt = chan->chan_adapter;
257
258 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS) {
259 if (chan->chan_openings > 0) {
260 chan->chan_openings--;
261 return (1);
262 }
263 return (0);
264 }
265
266 if (adapt->adapt_openings > 0) {
267 adapt->adapt_openings--;
268 return (1);
269 }
270 return (0);
271 }
272
273 /*
274 * scsipi_grow_resources:
275 *
276 * Attempt to grow resources for a channel. If this succeeds,
277 * we allocate one for our caller.
278 *
279 * NOTE: Must be called at splbio().
280 */
281 __inline int
282 scsipi_grow_resources(chan)
283 struct scsipi_channel *chan;
284 {
285
286 if (chan->chan_flags & SCSIPI_CHAN_CANGROW) {
287 if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
288 scsipi_adapter_request(chan,
289 ADAPTER_REQ_GROW_RESOURCES, NULL);
290 return (scsipi_get_resource(chan));
291 }
292 /*
293 * ask the channel thread to do it. It'll have to thaw the
294 * queue
295 */
296 scsipi_channel_freeze(chan, 1);
297 chan->chan_tflags |= SCSIPI_CHANT_GROWRES;
298 wakeup(&chan->chan_complete);
299 return (0);
300 }
301
302 return (0);
303 }
304
305 /*
306 * scsipi_put_resource:
307 *
308 * Free a single xfer `resource' to the channel.
309 *
310 * NOTE: Must be called at splbio().
311 */
312 void
313 scsipi_put_resource(chan)
314 struct scsipi_channel *chan;
315 {
316 struct scsipi_adapter *adapt = chan->chan_adapter;
317
318 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS)
319 chan->chan_openings++;
320 else
321 adapt->adapt_openings++;
322 }
323
324 /*
325 * scsipi_get_tag:
326 *
327 * Get a tag ID for the specified xfer.
328 *
329 * NOTE: Must be called at splbio().
330 */
331 void
332 scsipi_get_tag(xs)
333 struct scsipi_xfer *xs;
334 {
335 struct scsipi_periph *periph = xs->xs_periph;
336 int bit, tag;
337 u_int word;
338
339 bit = 0; /* XXX gcc */
340 for (word = 0; word < PERIPH_NTAGWORDS; word++) {
341 bit = ffs(periph->periph_freetags[word]);
342 if (bit != 0)
343 break;
344 }
345 #ifdef DIAGNOSTIC
346 if (word == PERIPH_NTAGWORDS) {
347 scsipi_printaddr(periph);
348 printf("no free tags\n");
349 panic("scsipi_get_tag");
350 }
351 #endif
352
353 bit -= 1;
354 periph->periph_freetags[word] &= ~(1 << bit);
355 tag = (word << 5) | bit;
356
357 /* XXX Should eventually disallow this completely. */
358 if (tag >= periph->periph_openings) {
359 scsipi_printaddr(periph);
360 printf("WARNING: tag %d greater than available openings %d\n",
361 tag, periph->periph_openings);
362 }
363
364 xs->xs_tag_id = tag;
365 }
366
367 /*
368 * scsipi_put_tag:
369 *
370 * Put the tag ID for the specified xfer back into the pool.
371 *
372 * NOTE: Must be called at splbio().
373 */
374 void
375 scsipi_put_tag(xs)
376 struct scsipi_xfer *xs;
377 {
378 struct scsipi_periph *periph = xs->xs_periph;
379 int word, bit;
380
381 word = xs->xs_tag_id >> 5;
382 bit = xs->xs_tag_id & 0x1f;
383
384 periph->periph_freetags[word] |= (1 << bit);
385 }
386
387 /*
388 * scsipi_get_xs:
389 *
390 * Allocate an xfer descriptor and associate it with the
391 * specified peripherial. If the peripherial has no more
392 * available command openings, we either block waiting for
393 * one to become available, or fail.
394 */
395 struct scsipi_xfer *
396 scsipi_get_xs(periph, flags)
397 struct scsipi_periph *periph;
398 int flags;
399 {
400 struct scsipi_xfer *xs;
401 int s;
402
403 SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_get_xs\n"));
404
405 /*
406 * If we're cold, make sure we poll.
407 */
408 if (cold)
409 flags |= XS_CTL_NOSLEEP | XS_CTL_POLL;
410
411 #ifdef DIAGNOSTIC
412 /*
413 * URGENT commands can never be ASYNC.
414 */
415 if ((flags & (XS_CTL_URGENT|XS_CTL_ASYNC)) ==
416 (XS_CTL_URGENT|XS_CTL_ASYNC)) {
417 scsipi_printaddr(periph);
418 printf("URGENT and ASYNC\n");
419 panic("scsipi_get_xs");
420 }
421 #endif
422
423 s = splbio();
424 /*
425 * Wait for a command opening to become available. Rules:
426 *
427 * - All xfers must wait for an available opening.
428 * Exception: URGENT xfers can proceed when
429 * active == openings, because we use the opening
430 * of the command we're recovering for.
431 * - if the periph has sense pending, only URGENT & REQSENSE
432 * xfers may proceed.
433 *
434 * - If the periph is recovering, only URGENT xfers may
435 * proceed.
436 *
437 * - If the periph is currently executing a recovery
438 * command, URGENT commands must block, because only
439 * one recovery command can execute at a time.
440 */
441 for (;;) {
442 if (flags & XS_CTL_URGENT) {
443 if (periph->periph_active > periph->periph_openings)
444 goto wait_for_opening;
445 if (periph->periph_flags & PERIPH_SENSE) {
446 if ((flags & XS_CTL_REQSENSE) == 0)
447 goto wait_for_opening;
448 } else {
449 if ((periph->periph_flags &
450 PERIPH_RECOVERY_ACTIVE) != 0)
451 goto wait_for_opening;
452 periph->periph_flags |= PERIPH_RECOVERY_ACTIVE;
453 }
454 break;
455 }
456 if (periph->periph_active >= periph->periph_openings ||
457 (periph->periph_flags & PERIPH_RECOVERING) != 0)
458 goto wait_for_opening;
459 periph->periph_active++;
460 break;
461
462 wait_for_opening:
463 if (flags & XS_CTL_NOSLEEP) {
464 splx(s);
465 return (NULL);
466 }
467 SC_DEBUG(periph, SCSIPI_DB3, ("sleeping\n"));
468 periph->periph_flags |= PERIPH_WAITING;
469 (void) tsleep(periph, PRIBIO, "getxs", 0);
470 }
471 SC_DEBUG(periph, SCSIPI_DB3, ("calling pool_get\n"));
472 xs = pool_get(&scsipi_xfer_pool,
473 ((flags & XS_CTL_NOSLEEP) != 0 ? PR_NOWAIT : PR_WAITOK));
474 if (xs == NULL) {
475 if (flags & XS_CTL_URGENT) {
476 if ((flags & XS_CTL_REQSENSE) == 0)
477 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
478 } else
479 periph->periph_active--;
480 scsipi_printaddr(periph);
481 printf("unable to allocate %sscsipi_xfer\n",
482 (flags & XS_CTL_URGENT) ? "URGENT " : "");
483 }
484 splx(s);
485
486 SC_DEBUG(periph, SCSIPI_DB3, ("returning\n"));
487
488 if (xs != NULL) {
489 memset(xs, 0, sizeof(*xs));
490 callout_init(&xs->xs_callout);
491 xs->xs_periph = periph;
492 xs->xs_control = flags;
493 xs->xs_status = 0;
494 s = splbio();
495 TAILQ_INSERT_TAIL(&periph->periph_xferq, xs, device_q);
496 splx(s);
497 }
498 return (xs);
499 }
500
501 /*
502 * scsipi_put_xs:
503 *
504 * Release an xfer descriptor, decreasing the outstanding command
505 * count for the peripherial. If there is a thread waiting for
506 * an opening, wake it up. If not, kick any queued I/O the
507 * peripherial may have.
508 *
509 * NOTE: Must be called at splbio().
510 */
511 void
512 scsipi_put_xs(xs)
513 struct scsipi_xfer *xs;
514 {
515 struct scsipi_periph *periph = xs->xs_periph;
516 int flags = xs->xs_control;
517
518 SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_free_xs\n"));
519
520 TAILQ_REMOVE(&periph->periph_xferq, xs, device_q);
521 pool_put(&scsipi_xfer_pool, xs);
522
523 #ifdef DIAGNOSTIC
524 if ((periph->periph_flags & PERIPH_RECOVERY_ACTIVE) != 0 &&
525 periph->periph_active == 0) {
526 scsipi_printaddr(periph);
527 printf("recovery without a command to recovery for\n");
528 panic("scsipi_put_xs");
529 }
530 #endif
531
532 if (flags & XS_CTL_URGENT) {
533 if ((flags & XS_CTL_REQSENSE) == 0)
534 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
535 } else
536 periph->periph_active--;
537 if (periph->periph_active == 0 &&
538 (periph->periph_flags & PERIPH_WAITDRAIN) != 0) {
539 periph->periph_flags &= ~PERIPH_WAITDRAIN;
540 wakeup(&periph->periph_active);
541 }
542
543 if (periph->periph_flags & PERIPH_WAITING) {
544 periph->periph_flags &= ~PERIPH_WAITING;
545 wakeup(periph);
546 } else {
547 if (periph->periph_switch->psw_start != NULL) {
548 SC_DEBUG(periph, SCSIPI_DB2,
549 ("calling private start()\n"));
550 (*periph->periph_switch->psw_start)(periph);
551 }
552 }
553 }
554
555 /*
556 * scsipi_channel_freeze:
557 *
558 * Freeze a channel's xfer queue.
559 */
560 void
561 scsipi_channel_freeze(chan, count)
562 struct scsipi_channel *chan;
563 int count;
564 {
565 int s;
566
567 s = splbio();
568 chan->chan_qfreeze += count;
569 splx(s);
570 }
571
572 /*
573 * scsipi_channel_thaw:
574 *
575 * Thaw a channel's xfer queue.
576 */
577 void
578 scsipi_channel_thaw(chan, count)
579 struct scsipi_channel *chan;
580 int count;
581 {
582 int s;
583
584 s = splbio();
585 chan->chan_qfreeze -= count;
586 /*
587 * Don't let the freeze count go negative.
588 *
589 * Presumably the adapter driver could keep track of this,
590 * but it might just be easier to do this here so as to allow
591 * multiple callers, including those outside the adapter driver.
592 */
593 if (chan->chan_qfreeze < 0) {
594 chan->chan_qfreeze = 0;
595 }
596 splx(s);
597 /*
598 * Kick the channel's queue here. Note, we may be running in
599 * interrupt context (softclock or HBA's interrupt), so the adapter
600 * driver had better not sleep.
601 */
602 if (chan->chan_qfreeze == 0)
603 scsipi_run_queue(chan);
604 }
605
606 /*
607 * scsipi_channel_timed_thaw:
608 *
609 * Thaw a channel after some time has expired. This will also
610 * run the channel's queue if the freeze count has reached 0.
611 */
612 void
613 scsipi_channel_timed_thaw(arg)
614 void *arg;
615 {
616 struct scsipi_channel *chan = arg;
617
618 scsipi_channel_thaw(chan, 1);
619 }
620
621 /*
622 * scsipi_periph_freeze:
623 *
624 * Freeze a device's xfer queue.
625 */
626 void
627 scsipi_periph_freeze(periph, count)
628 struct scsipi_periph *periph;
629 int count;
630 {
631 int s;
632
633 s = splbio();
634 periph->periph_qfreeze += count;
635 splx(s);
636 }
637
638 /*
639 * scsipi_periph_thaw:
640 *
641 * Thaw a device's xfer queue.
642 */
643 void
644 scsipi_periph_thaw(periph, count)
645 struct scsipi_periph *periph;
646 int count;
647 {
648 int s;
649
650 s = splbio();
651 periph->periph_qfreeze -= count;
652 #ifdef DIAGNOSTIC
653 if (periph->periph_qfreeze < 0) {
654 static const char pc[] = "periph freeze count < 0";
655 scsipi_printaddr(periph);
656 printf("%s\n", pc);
657 panic(pc);
658 }
659 #endif
660 if (periph->periph_qfreeze == 0 &&
661 (periph->periph_flags & PERIPH_WAITING) != 0)
662 wakeup(periph);
663 splx(s);
664 }
665
666 /*
667 * scsipi_periph_timed_thaw:
668 *
669 * Thaw a device after some time has expired.
670 */
671 void
672 scsipi_periph_timed_thaw(arg)
673 void *arg;
674 {
675 int s;
676 struct scsipi_periph *periph = arg;
677
678 callout_stop(&periph->periph_callout);
679
680 s = splbio();
681 scsipi_periph_thaw(periph, 1);
682 if ((periph->periph_channel->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
683 /*
684 * Kick the channel's queue here. Note, we're running in
685 * interrupt context (softclock), so the adapter driver
686 * had better not sleep.
687 */
688 scsipi_run_queue(periph->periph_channel);
689 } else {
690 /*
691 * Tell the completion thread to kick the channel's queue here.
692 */
693 periph->periph_channel->chan_tflags |= SCSIPI_CHANT_KICK;
694 wakeup(&periph->periph_channel->chan_complete);
695 }
696 splx(s);
697 }
698
699 /*
700 * scsipi_wait_drain:
701 *
702 * Wait for a periph's pending xfers to drain.
703 */
704 void
705 scsipi_wait_drain(periph)
706 struct scsipi_periph *periph;
707 {
708 int s;
709
710 s = splbio();
711 while (periph->periph_active != 0) {
712 periph->periph_flags |= PERIPH_WAITDRAIN;
713 (void) tsleep(&periph->periph_active, PRIBIO, "sxdrn", 0);
714 }
715 splx(s);
716 }
717
718 /*
719 * scsipi_kill_pending:
720 *
721 * Kill off all pending xfers for a periph.
722 *
723 * NOTE: Must be called at splbio().
724 */
725 void
726 scsipi_kill_pending(periph)
727 struct scsipi_periph *periph;
728 {
729
730 (*periph->periph_channel->chan_bustype->bustype_kill_pending)(periph);
731 #ifdef DIAGNOSTIC
732 if (TAILQ_FIRST(&periph->periph_xferq) != NULL)
733 panic("scsipi_kill_pending");
734 #endif
735 scsipi_wait_drain(periph);
736 }
737
738 /*
739 * scsipi_print_cdb:
740 * prints a command descriptor block (for debug purpose, error messages,
741 * SCSIPI_VERBOSE, ...)
742 */
743 void
744 scsipi_print_cdb(cmd)
745 struct scsipi_generic *cmd;
746 {
747 int i, j;
748
749 printf("0x%02x", cmd->opcode);
750
751 switch (CDB_GROUPID(cmd->opcode)) {
752 case CDB_GROUPID_0:
753 j = CDB_GROUP0;
754 break;
755 case CDB_GROUPID_1:
756 j = CDB_GROUP1;
757 break;
758 case CDB_GROUPID_2:
759 j = CDB_GROUP2;
760 break;
761 case CDB_GROUPID_3:
762 j = CDB_GROUP3;
763 break;
764 case CDB_GROUPID_4:
765 j = CDB_GROUP4;
766 break;
767 case CDB_GROUPID_5:
768 j = CDB_GROUP5;
769 break;
770 case CDB_GROUPID_6:
771 j = CDB_GROUP6;
772 break;
773 case CDB_GROUPID_7:
774 j = CDB_GROUP7;
775 break;
776 default:
777 j = 0;
778 }
779 if (j == 0)
780 j = sizeof (cmd->bytes);
781 for (i = 0; i < j-1; i++) /* already done the opcode */
782 printf(" %02x", cmd->bytes[i]);
783 }
784
785 /*
786 * scsipi_interpret_sense:
787 *
788 * Look at the returned sense and act on the error, determining
789 * the unix error number to pass back. (0 = report no error)
790 *
791 * NOTE: If we return ERESTART, we are expected to haved
792 * thawed the device!
793 *
794 * THIS IS THE DEFAULT ERROR HANDLER FOR SCSI DEVICES.
795 */
796 int
797 scsipi_interpret_sense(xs)
798 struct scsipi_xfer *xs;
799 {
800 struct scsipi_sense_data *sense;
801 struct scsipi_periph *periph = xs->xs_periph;
802 u_int8_t key;
803 int error;
804 #ifndef SCSIVERBOSE
805 u_int32_t info;
806 static char *error_mes[] = {
807 "soft error (corrected)",
808 "not ready", "medium error",
809 "non-media hardware failure", "illegal request",
810 "unit attention", "readonly device",
811 "no data found", "vendor unique",
812 "copy aborted", "command aborted",
813 "search returned equal", "volume overflow",
814 "verify miscompare", "unknown error key"
815 };
816 #endif
817
818 sense = &xs->sense.scsi_sense;
819 #ifdef SCSIPI_DEBUG
820 if (periph->periph_flags & SCSIPI_DB1) {
821 int count;
822 scsipi_printaddr(periph);
823 printf(" sense debug information:\n");
824 printf("\tcode 0x%x valid 0x%x\n",
825 sense->error_code & SSD_ERRCODE,
826 sense->error_code & SSD_ERRCODE_VALID ? 1 : 0);
827 printf("\tseg 0x%x key 0x%x ili 0x%x eom 0x%x fmark 0x%x\n",
828 sense->segment,
829 sense->flags & SSD_KEY,
830 sense->flags & SSD_ILI ? 1 : 0,
831 sense->flags & SSD_EOM ? 1 : 0,
832 sense->flags & SSD_FILEMARK ? 1 : 0);
833 printf("\ninfo: 0x%x 0x%x 0x%x 0x%x followed by %d "
834 "extra bytes\n",
835 sense->info[0],
836 sense->info[1],
837 sense->info[2],
838 sense->info[3],
839 sense->extra_len);
840 printf("\textra: ");
841 for (count = 0; count < ADD_BYTES_LIM(sense); count++)
842 printf("0x%x ", sense->cmd_spec_info[count]);
843 printf("\n");
844 }
845 #endif
846
847 /*
848 * If the periph has it's own error handler, call it first.
849 * If it returns a legit error value, return that, otherwise
850 * it wants us to continue with normal error processing.
851 */
852 if (periph->periph_switch->psw_error != NULL) {
853 SC_DEBUG(periph, SCSIPI_DB2,
854 ("calling private err_handler()\n"));
855 error = (*periph->periph_switch->psw_error)(xs);
856 if (error != EJUSTRETURN)
857 return (error);
858 }
859 /* otherwise use the default */
860 switch (sense->error_code & SSD_ERRCODE) {
861
862 /*
863 * Old SCSI-1 and SASI devices respond with
864 * codes other than 70.
865 */
866 case 0x00: /* no error (command completed OK) */
867 return (0);
868 case 0x04: /* drive not ready after it was selected */
869 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
870 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
871 if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
872 return (0);
873 /* XXX - display some sort of error here? */
874 return (EIO);
875 case 0x20: /* invalid command */
876 if ((xs->xs_control &
877 XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
878 return (0);
879 return (EINVAL);
880 case 0x25: /* invalid LUN (Adaptec ACB-4000) */
881 return (EACCES);
882
883 /*
884 * If it's code 70, use the extended stuff and
885 * interpret the key
886 */
887 case 0x71: /* delayed error */
888 scsipi_printaddr(periph);
889 key = sense->flags & SSD_KEY;
890 printf(" DEFERRED ERROR, key = 0x%x\n", key);
891 /* FALLTHROUGH */
892 case 0x70:
893 #ifndef SCSIVERBOSE
894 if ((sense->error_code & SSD_ERRCODE_VALID) != 0)
895 info = _4btol(sense->info);
896 else
897 info = 0;
898 #endif
899 key = sense->flags & SSD_KEY;
900
901 switch (key) {
902 case SKEY_NO_SENSE:
903 case SKEY_RECOVERED_ERROR:
904 if (xs->resid == xs->datalen && xs->datalen) {
905 /*
906 * Why is this here?
907 */
908 xs->resid = 0; /* not short read */
909 }
910 case SKEY_EQUAL:
911 error = 0;
912 break;
913 case SKEY_NOT_READY:
914 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
915 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
916 if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
917 return (0);
918 if (sense->add_sense_code == 0x3A) {
919 error = ENODEV; /* Medium not present */
920 if (xs->xs_control & XS_CTL_SILENT_NODEV)
921 return (error);
922 } else
923 error = EIO;
924 if ((xs->xs_control & XS_CTL_SILENT) != 0)
925 return (error);
926 break;
927 case SKEY_ILLEGAL_REQUEST:
928 if ((xs->xs_control &
929 XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
930 return (0);
931 /*
932 * Handle the case where a device reports
933 * Logical Unit Not Supported during discovery.
934 */
935 if ((xs->xs_control & XS_CTL_DISCOVERY) != 0 &&
936 sense->add_sense_code == 0x25 &&
937 sense->add_sense_code_qual == 0x00)
938 return (EINVAL);
939 if ((xs->xs_control & XS_CTL_SILENT) != 0)
940 return (EIO);
941 error = EINVAL;
942 break;
943 case SKEY_UNIT_ATTENTION:
944 if (sense->add_sense_code == 0x29 &&
945 sense->add_sense_code_qual == 0x00) {
946 /* device or bus reset */
947 return (ERESTART);
948 }
949 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
950 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
951 if ((xs->xs_control &
952 XS_CTL_IGNORE_MEDIA_CHANGE) != 0 ||
953 /* XXX Should reupload any transient state. */
954 (periph->periph_flags &
955 PERIPH_REMOVABLE) == 0) {
956 return (ERESTART);
957 }
958 if ((xs->xs_control & XS_CTL_SILENT) != 0)
959 return (EIO);
960 error = EIO;
961 break;
962 case SKEY_WRITE_PROTECT:
963 error = EROFS;
964 break;
965 case SKEY_BLANK_CHECK:
966 error = 0;
967 break;
968 case SKEY_ABORTED_COMMAND:
969 if (xs->xs_retries != 0) {
970 xs->xs_retries--;
971 error = ERESTART;
972 } else
973 error = EIO;
974 break;
975 case SKEY_VOLUME_OVERFLOW:
976 error = ENOSPC;
977 break;
978 default:
979 error = EIO;
980 break;
981 }
982
983 #ifdef SCSIVERBOSE
984 if (key && (xs->xs_control & XS_CTL_SILENT) == 0)
985 scsipi_print_sense(xs, 0);
986 #else
987 if (key) {
988 scsipi_printaddr(periph);
989 printf("%s", error_mes[key - 1]);
990 if ((sense->error_code & SSD_ERRCODE_VALID) != 0) {
991 switch (key) {
992 case SKEY_NOT_READY:
993 case SKEY_ILLEGAL_REQUEST:
994 case SKEY_UNIT_ATTENTION:
995 case SKEY_WRITE_PROTECT:
996 break;
997 case SKEY_BLANK_CHECK:
998 printf(", requested size: %d (decimal)",
999 info);
1000 break;
1001 case SKEY_ABORTED_COMMAND:
1002 if (xs->xs_retries)
1003 printf(", retrying");
1004 printf(", cmd 0x%x, info 0x%x",
1005 xs->cmd->opcode, info);
1006 break;
1007 default:
1008 printf(", info = %d (decimal)", info);
1009 }
1010 }
1011 if (sense->extra_len != 0) {
1012 int n;
1013 printf(", data =");
1014 for (n = 0; n < sense->extra_len; n++)
1015 printf(" %02x",
1016 sense->cmd_spec_info[n]);
1017 }
1018 printf("\n");
1019 }
1020 #endif
1021 return (error);
1022
1023 /*
1024 * Some other code, just report it
1025 */
1026 default:
1027 #if defined(SCSIDEBUG) || defined(DEBUG)
1028 {
1029 static char *uc = "undecodable sense error";
1030 int i;
1031 u_int8_t *cptr = (u_int8_t *) sense;
1032 scsipi_printaddr(periph);
1033 if (xs->cmd == &xs->cmdstore) {
1034 printf("%s for opcode 0x%x, data=",
1035 uc, xs->cmdstore.opcode);
1036 } else {
1037 printf("%s, data=", uc);
1038 }
1039 for (i = 0; i < sizeof (sense); i++)
1040 printf(" 0x%02x", *(cptr++) & 0xff);
1041 printf("\n");
1042 }
1043 #else
1044 scsipi_printaddr(periph);
1045 printf("Sense Error Code 0x%x",
1046 sense->error_code & SSD_ERRCODE);
1047 if ((sense->error_code & SSD_ERRCODE_VALID) != 0) {
1048 struct scsipi_sense_data_unextended *usense =
1049 (struct scsipi_sense_data_unextended *)sense;
1050 printf(" at block no. %d (decimal)",
1051 _3btol(usense->block));
1052 }
1053 printf("\n");
1054 #endif
1055 return (EIO);
1056 }
1057 }
1058
1059 /*
1060 * scsipi_size:
1061 *
1062 * Find out from the device what its capacity is.
1063 */
1064 u_int64_t
1065 scsipi_size(periph, flags)
1066 struct scsipi_periph *periph;
1067 int flags;
1068 {
1069 struct scsipi_read_cap_data rdcap;
1070 struct scsipi_read_capacity scsipi_cmd;
1071
1072 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1073 scsipi_cmd.opcode = READ_CAPACITY;
1074
1075 /*
1076 * If the command works, interpret the result as a 4 byte
1077 * number of blocks
1078 */
1079 if (scsipi_command(periph, NULL, (struct scsipi_generic *)&scsipi_cmd,
1080 sizeof(scsipi_cmd), (u_char *)&rdcap, sizeof(rdcap),
1081 SCSIPIRETRIES, 20000, NULL,
1082 flags | XS_CTL_DATA_IN | XS_CTL_DATA_ONSTACK | XS_CTL_SILENT) != 0)
1083 return (0);
1084
1085 return (_4btol(rdcap.addr) + 1);
1086 }
1087
1088 /*
1089 * scsipi_test_unit_ready:
1090 *
1091 * Issue a `test unit ready' request.
1092 */
1093 int
1094 scsipi_test_unit_ready(periph, flags)
1095 struct scsipi_periph *periph;
1096 int flags;
1097 {
1098 int retries;
1099 struct scsipi_test_unit_ready scsipi_cmd;
1100
1101 /* some ATAPI drives don't support TEST_UNIT_READY. Sigh */
1102 if (periph->periph_quirks & PQUIRK_NOTUR)
1103 return (0);
1104
1105 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1106 scsipi_cmd.opcode = TEST_UNIT_READY;
1107
1108 if (flags & XS_CTL_DISCOVERY)
1109 retries = 0;
1110 else
1111 retries = SCSIPIRETRIES;
1112
1113 return (scsipi_command(periph, NULL,
1114 (struct scsipi_generic *)&scsipi_cmd, sizeof(scsipi_cmd),
1115 0, 0, retries, 10000, NULL, flags));
1116 }
1117
1118 /*
1119 * scsipi_inquire:
1120 *
1121 * Ask the device about itself.
1122 */
1123 int
1124 scsipi_inquire(periph, inqbuf, flags)
1125 struct scsipi_periph *periph;
1126 struct scsipi_inquiry_data *inqbuf;
1127 int flags;
1128 {
1129 int retries;
1130 struct scsipi_inquiry scsipi_cmd;
1131 int error;
1132
1133 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1134 scsipi_cmd.opcode = INQUIRY;
1135
1136 if (flags & XS_CTL_DISCOVERY)
1137 retries = 0;
1138 else
1139 retries = SCSIPIRETRIES;
1140
1141 /*
1142 * If we request more data than the device can provide, it SHOULD just
1143 * return a short reponse. However, some devices error with an
1144 * ILLEGAL REQUEST sense code, and yet others have even more special
1145 * failture modes (such as the GL641USB flash adapter, which goes loony
1146 * and sends corrupted CRCs). To work around this, and to bring our
1147 * behavior more in line with other OSes, we do a shorter inquiry,
1148 * covering all the SCSI-2 information, first, and then request more
1149 * data iff the "additional length" field indicates there is more.
1150 * - mycroft, 2003/10/16
1151 */
1152 scsipi_cmd.length = SCSIPI_INQUIRY_LENGTH_SCSI2;
1153 error = scsipi_command(periph, NULL,
1154 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1155 (u_char *) inqbuf, SCSIPI_INQUIRY_LENGTH_SCSI2,
1156 retries, 10000, NULL, XS_CTL_DATA_IN | flags);
1157 if (!error && inqbuf->additional_length > SCSIPI_INQUIRY_LENGTH_SCSI2 - 4) {
1158 scsipi_cmd.length = SCSIPI_INQUIRY_LENGTH_SCSI3;
1159 error = scsipi_command(periph, NULL,
1160 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1161 (u_char *) inqbuf, SCSIPI_INQUIRY_LENGTH_SCSI3,
1162 retries, 10000, NULL, XS_CTL_DATA_IN | flags);
1163 }
1164
1165 #ifdef SCSI_OLD_NOINQUIRY
1166 /*
1167 * Kludge for the Adaptec ACB-4000 SCSI->MFM translator.
1168 * This board doesn't support the INQUIRY command at all.
1169 */
1170 if (error == EINVAL || error == EACCES) {
1171 /*
1172 * Conjure up an INQUIRY response.
1173 */
1174 inqbuf->device = (error == EINVAL ?
1175 SID_QUAL_LU_PRESENT :
1176 SID_QUAL_LU_NOTPRESENT) | T_DIRECT;
1177 inqbuf->dev_qual2 = 0;
1178 inqbuf->version = 0;
1179 inqbuf->response_format = SID_FORMAT_SCSI1;
1180 inqbuf->additional_length = SCSIPI_INQUIRY_LENGTH_SCSI2 - 4;
1181 inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0;
1182 memcpy(inqbuf->vendor, "ADAPTEC ACB-4000 ", 28);
1183 error = 0;
1184 }
1185
1186 /*
1187 * Kludge for the Emulex MT-02 SCSI->QIC translator.
1188 * This board gives an empty response to an INQUIRY command.
1189 */
1190 else if (error == 0 &&
1191 inqbuf->device == (SID_QUAL_LU_PRESENT | T_DIRECT) &&
1192 inqbuf->dev_qual2 == 0 &&
1193 inqbuf->version == 0 &&
1194 inqbuf->response_format == SID_FORMAT_SCSI1) {
1195 /*
1196 * Fill out the INQUIRY response.
1197 */
1198 inqbuf->device = (SID_QUAL_LU_PRESENT | T_SEQUENTIAL);
1199 inqbuf->dev_qual2 = SID_REMOVABLE;
1200 inqbuf->additional_length = SCSIPI_INQUIRY_LENGTH_SCSI2 - 4;
1201 inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0;
1202 memcpy(inqbuf->vendor, "EMULEX MT-02 QIC ", 28);
1203 }
1204 #endif /* SCSI_OLD_NOINQUIRY */
1205
1206 return error;
1207 }
1208
1209 /*
1210 * scsipi_prevent:
1211 *
1212 * Prevent or allow the user to remove the media
1213 */
1214 int
1215 scsipi_prevent(periph, type, flags)
1216 struct scsipi_periph *periph;
1217 int type, flags;
1218 {
1219 struct scsipi_prevent scsipi_cmd;
1220
1221 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1222 scsipi_cmd.opcode = PREVENT_ALLOW;
1223 scsipi_cmd.how = type;
1224
1225 return (scsipi_command(periph, NULL,
1226 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1227 0, 0, SCSIPIRETRIES, 5000, NULL, flags));
1228 }
1229
1230 /*
1231 * scsipi_start:
1232 *
1233 * Send a START UNIT.
1234 */
1235 int
1236 scsipi_start(periph, type, flags)
1237 struct scsipi_periph *periph;
1238 int type, flags;
1239 {
1240 struct scsipi_start_stop scsipi_cmd;
1241
1242 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1243 scsipi_cmd.opcode = START_STOP;
1244 scsipi_cmd.byte2 = 0x00;
1245 scsipi_cmd.how = type;
1246
1247 return (scsipi_command(periph, NULL,
1248 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1249 0, 0, SCSIPIRETRIES, (type & SSS_START) ? 60000 : 10000,
1250 NULL, flags));
1251 }
1252
1253 /*
1254 * scsipi_mode_sense, scsipi_mode_sense_big:
1255 * get a sense page from a device
1256 */
1257
1258 int
1259 scsipi_mode_sense(periph, byte2, page, data, len, flags, retries, timeout)
1260 struct scsipi_periph *periph;
1261 int byte2, page, len, flags, retries, timeout;
1262 struct scsipi_mode_header *data;
1263 {
1264 struct scsipi_mode_sense scsipi_cmd;
1265 int error;
1266
1267 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1268 scsipi_cmd.opcode = MODE_SENSE;
1269 scsipi_cmd.byte2 = byte2;
1270 scsipi_cmd.page = page;
1271 scsipi_cmd.length = len & 0xff;
1272 error = scsipi_command(periph, NULL,
1273 (struct scsipi_generic *)&scsipi_cmd, sizeof(scsipi_cmd),
1274 (void *)data, len, retries, timeout, NULL,
1275 flags | XS_CTL_DATA_IN);
1276 SC_DEBUG(periph, SCSIPI_DB2,
1277 ("scsipi_mode_sense: error=%d\n", error));
1278 return (error);
1279 }
1280
1281 int
1282 scsipi_mode_sense_big(periph, byte2, page, data, len, flags, retries, timeout)
1283 struct scsipi_periph *periph;
1284 int byte2, page, len, flags, retries, timeout;
1285 struct scsipi_mode_header_big *data;
1286 {
1287 struct scsipi_mode_sense_big scsipi_cmd;
1288 int error;
1289
1290 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1291 scsipi_cmd.opcode = MODE_SENSE_BIG;
1292 scsipi_cmd.byte2 = byte2;
1293 scsipi_cmd.page = page;
1294 _lto2b(len, scsipi_cmd.length);
1295 error = scsipi_command(periph, NULL,
1296 (struct scsipi_generic *)&scsipi_cmd, sizeof(scsipi_cmd),
1297 (void *)data, len, retries, timeout, NULL,
1298 flags | XS_CTL_DATA_IN);
1299 SC_DEBUG(periph, SCSIPI_DB2,
1300 ("scsipi_mode_sense_big: error=%d\n", error));
1301 return (error);
1302 }
1303
1304 int
1305 scsipi_mode_select(periph, byte2, data, len, flags, retries, timeout)
1306 struct scsipi_periph *periph;
1307 int byte2, len, flags, retries, timeout;
1308 struct scsipi_mode_header *data;
1309 {
1310 struct scsipi_mode_select scsipi_cmd;
1311 int error;
1312
1313 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1314 scsipi_cmd.opcode = MODE_SELECT;
1315 scsipi_cmd.byte2 = byte2;
1316 scsipi_cmd.length = len & 0xff;
1317 error = scsipi_command(periph, NULL,
1318 (struct scsipi_generic *)&scsipi_cmd, sizeof(scsipi_cmd),
1319 (void *)data, len, retries, timeout, NULL,
1320 flags | XS_CTL_DATA_OUT);
1321 SC_DEBUG(periph, SCSIPI_DB2,
1322 ("scsipi_mode_select: error=%d\n", error));
1323 return (error);
1324 }
1325
1326 int
1327 scsipi_mode_select_big(periph, byte2, data, len, flags, retries, timeout)
1328 struct scsipi_periph *periph;
1329 int byte2, len, flags, retries, timeout;
1330 struct scsipi_mode_header_big *data;
1331 {
1332 struct scsipi_mode_select_big scsipi_cmd;
1333 int error;
1334
1335 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1336 scsipi_cmd.opcode = MODE_SELECT_BIG;
1337 scsipi_cmd.byte2 = byte2;
1338 _lto2b(len, scsipi_cmd.length);
1339 error = scsipi_command(periph, NULL,
1340 (struct scsipi_generic *)&scsipi_cmd, sizeof(scsipi_cmd),
1341 (void *)data, len, retries, timeout, NULL,
1342 flags | XS_CTL_DATA_OUT);
1343 SC_DEBUG(periph, SCSIPI_DB2,
1344 ("scsipi_mode_select: error=%d\n", error));
1345 return (error);
1346 }
1347
1348 /*
1349 * scsipi_done:
1350 *
1351 * This routine is called by an adapter's interrupt handler when
1352 * an xfer is completed.
1353 */
1354 void
1355 scsipi_done(xs)
1356 struct scsipi_xfer *xs;
1357 {
1358 struct scsipi_periph *periph = xs->xs_periph;
1359 struct scsipi_channel *chan = periph->periph_channel;
1360 int s, freezecnt;
1361
1362 SC_DEBUG(periph, SCSIPI_DB2, ("scsipi_done\n"));
1363 #ifdef SCSIPI_DEBUG
1364 if (periph->periph_dbflags & SCSIPI_DB1)
1365 show_scsipi_cmd(xs);
1366 #endif
1367
1368 s = splbio();
1369 /*
1370 * The resource this command was using is now free.
1371 */
1372 scsipi_put_resource(chan);
1373 xs->xs_periph->periph_sent--;
1374
1375 /*
1376 * If the command was tagged, free the tag.
1377 */
1378 if (XS_CTL_TAGTYPE(xs) != 0)
1379 scsipi_put_tag(xs);
1380 else
1381 periph->periph_flags &= ~PERIPH_UNTAG;
1382
1383 /* Mark the command as `done'. */
1384 xs->xs_status |= XS_STS_DONE;
1385
1386 #ifdef DIAGNOSTIC
1387 if ((xs->xs_control & (XS_CTL_ASYNC|XS_CTL_POLL)) ==
1388 (XS_CTL_ASYNC|XS_CTL_POLL))
1389 panic("scsipi_done: ASYNC and POLL");
1390 #endif
1391
1392 /*
1393 * If the xfer had an error of any sort, freeze the
1394 * periph's queue. Freeze it again if we were requested
1395 * to do so in the xfer.
1396 */
1397 freezecnt = 0;
1398 if (xs->error != XS_NOERROR)
1399 freezecnt++;
1400 if (xs->xs_control & XS_CTL_FREEZE_PERIPH)
1401 freezecnt++;
1402 if (freezecnt != 0)
1403 scsipi_periph_freeze(periph, freezecnt);
1404
1405 /*
1406 * record the xfer with a pending sense, in case a SCSI reset is
1407 * received before the thread is waked up.
1408 */
1409 if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
1410 periph->periph_flags |= PERIPH_SENSE;
1411 periph->periph_xscheck = xs;
1412 }
1413
1414 /*
1415 * If this was an xfer that was not to complete asynchronously,
1416 * let the requesting thread perform error checking/handling
1417 * in its context.
1418 */
1419 if ((xs->xs_control & XS_CTL_ASYNC) == 0) {
1420 splx(s);
1421 /*
1422 * If it's a polling job, just return, to unwind the
1423 * call graph. We don't need to restart the queue,
1424 * because pollings jobs are treated specially, and
1425 * are really only used during crash dumps anyway
1426 * (XXX or during boot-time autconfiguration of
1427 * ATAPI devices).
1428 */
1429 if (xs->xs_control & XS_CTL_POLL)
1430 return;
1431 wakeup(xs);
1432 goto out;
1433 }
1434
1435 /*
1436 * Catch the extremely common case of I/O completing
1437 * without error; no use in taking a context switch
1438 * if we can handle it in interrupt context.
1439 */
1440 if (xs->error == XS_NOERROR) {
1441 splx(s);
1442 (void) scsipi_complete(xs);
1443 goto out;
1444 }
1445
1446 /*
1447 * There is an error on this xfer. Put it on the channel's
1448 * completion queue, and wake up the completion thread.
1449 */
1450 TAILQ_INSERT_TAIL(&chan->chan_complete, xs, channel_q);
1451 splx(s);
1452 wakeup(&chan->chan_complete);
1453
1454 out:
1455 /*
1456 * If there are more xfers on the channel's queue, attempt to
1457 * run them.
1458 */
1459 scsipi_run_queue(chan);
1460 }
1461
1462 /*
1463 * scsipi_complete:
1464 *
1465 * Completion of a scsipi_xfer. This is the guts of scsipi_done().
1466 *
1467 * NOTE: This routine MUST be called with valid thread context
1468 * except for the case where the following two conditions are
1469 * true:
1470 *
1471 * xs->error == XS_NOERROR
1472 * XS_CTL_ASYNC is set in xs->xs_control
1473 *
1474 * The semantics of this routine can be tricky, so here is an
1475 * explanation:
1476 *
1477 * 0 Xfer completed successfully.
1478 *
1479 * ERESTART Xfer had an error, but was restarted.
1480 *
1481 * anything else Xfer had an error, return value is Unix
1482 * errno.
1483 *
1484 * If the return value is anything but ERESTART:
1485 *
1486 * - If XS_CTL_ASYNC is set, `xs' has been freed back to
1487 * the pool.
1488 * - If there is a buf associated with the xfer,
1489 * it has been biodone()'d.
1490 */
1491 int
1492 scsipi_complete(xs)
1493 struct scsipi_xfer *xs;
1494 {
1495 struct scsipi_periph *periph = xs->xs_periph;
1496 struct scsipi_channel *chan = periph->periph_channel;
1497 struct buf *bp;
1498 int error, s;
1499
1500 #ifdef DIAGNOSTIC
1501 if ((xs->xs_control & XS_CTL_ASYNC) != 0 && xs->bp == NULL)
1502 panic("scsipi_complete: XS_CTL_ASYNC but no buf");
1503 #endif
1504 /*
1505 * If command terminated with a CHECK CONDITION, we need to issue a
1506 * REQUEST_SENSE command. Once the REQUEST_SENSE has been processed
1507 * we'll have the real status.
1508 * Must be processed at splbio() to avoid missing a SCSI bus reset
1509 * for this command.
1510 */
1511 s = splbio();
1512 if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
1513 /* request sense for a request sense ? */
1514 if (xs->xs_control & XS_CTL_REQSENSE) {
1515 scsipi_printaddr(periph);
1516 printf("request sense for a request sense ?\n");
1517 /* XXX maybe we should reset the device ? */
1518 /* we've been frozen because xs->error != XS_NOERROR */
1519 scsipi_periph_thaw(periph, 1);
1520 splx(s);
1521 if (xs->resid < xs->datalen) {
1522 printf("we read %d bytes of sense anyway:\n",
1523 xs->datalen - xs->resid);
1524 #ifdef SCSIVERBOSE
1525 scsipi_print_sense_data((void *)xs->data, 0);
1526 #endif
1527 }
1528 return EINVAL;
1529 }
1530 scsipi_request_sense(xs);
1531 }
1532 splx(s);
1533
1534 /*
1535 * If it's a user level request, bypass all usual completion
1536 * processing, let the user work it out..
1537 */
1538 if ((xs->xs_control & XS_CTL_USERCMD) != 0) {
1539 SC_DEBUG(periph, SCSIPI_DB3, ("calling user done()\n"));
1540 if (xs->error != XS_NOERROR)
1541 scsipi_periph_thaw(periph, 1);
1542 scsipi_user_done(xs);
1543 SC_DEBUG(periph, SCSIPI_DB3, ("returned from user done()\n "));
1544 return 0;
1545 }
1546
1547 switch (xs->error) {
1548 case XS_NOERROR:
1549 error = 0;
1550 break;
1551
1552 case XS_SENSE:
1553 case XS_SHORTSENSE:
1554 error = (*chan->chan_bustype->bustype_interpret_sense)(xs);
1555 break;
1556
1557 case XS_RESOURCE_SHORTAGE:
1558 /*
1559 * XXX Should freeze channel's queue.
1560 */
1561 scsipi_printaddr(periph);
1562 printf("adapter resource shortage\n");
1563 /* FALLTHROUGH */
1564
1565 case XS_BUSY:
1566 if (xs->error == XS_BUSY && xs->status == SCSI_QUEUE_FULL) {
1567 struct scsipi_max_openings mo;
1568
1569 /*
1570 * We set the openings to active - 1, assuming that
1571 * the command that got us here is the first one that
1572 * can't fit into the device's queue. If that's not
1573 * the case, I guess we'll find out soon enough.
1574 */
1575 mo.mo_target = periph->periph_target;
1576 mo.mo_lun = periph->periph_lun;
1577 if (periph->periph_active < periph->periph_openings)
1578 mo.mo_openings = periph->periph_active - 1;
1579 else
1580 mo.mo_openings = periph->periph_openings - 1;
1581 #ifdef DIAGNOSTIC
1582 if (mo.mo_openings < 0) {
1583 scsipi_printaddr(periph);
1584 printf("QUEUE FULL resulted in < 0 openings\n");
1585 panic("scsipi_done");
1586 }
1587 #endif
1588 if (mo.mo_openings == 0) {
1589 scsipi_printaddr(periph);
1590 printf("QUEUE FULL resulted in 0 openings\n");
1591 mo.mo_openings = 1;
1592 }
1593 scsipi_async_event(chan, ASYNC_EVENT_MAX_OPENINGS, &mo);
1594 error = ERESTART;
1595 } else if (xs->xs_retries != 0) {
1596 xs->xs_retries--;
1597 /*
1598 * Wait one second, and try again.
1599 */
1600 if ((xs->xs_control & XS_CTL_POLL) ||
1601 (chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
1602 delay(1000000);
1603 } else if (!callout_pending(&periph->periph_callout)) {
1604 scsipi_periph_freeze(periph, 1);
1605 callout_reset(&periph->periph_callout,
1606 hz, scsipi_periph_timed_thaw, periph);
1607 }
1608 error = ERESTART;
1609 } else
1610 error = EBUSY;
1611 break;
1612
1613 case XS_REQUEUE:
1614 error = ERESTART;
1615 break;
1616
1617 case XS_SELTIMEOUT:
1618 case XS_TIMEOUT:
1619 /*
1620 * If the device hasn't gone away, honor retry counts.
1621 *
1622 * Note that if we're in the middle of probing it,
1623 * it won't be found because it isn't here yet so
1624 * we won't honor the retry count in that case.
1625 */
1626 if (scsipi_lookup_periph(chan, periph->periph_target,
1627 periph->periph_lun) && xs->xs_retries != 0) {
1628 xs->xs_retries--;
1629 error = ERESTART;
1630 } else
1631 error = EIO;
1632 break;
1633
1634 case XS_RESET:
1635 if (xs->xs_control & XS_CTL_REQSENSE) {
1636 /*
1637 * request sense interrupted by reset: signal it
1638 * with EINTR return code.
1639 */
1640 error = EINTR;
1641 } else {
1642 if (xs->xs_retries != 0) {
1643 xs->xs_retries--;
1644 error = ERESTART;
1645 } else
1646 error = EIO;
1647 }
1648 break;
1649
1650 case XS_DRIVER_STUFFUP:
1651 scsipi_printaddr(periph);
1652 printf("generic HBA error\n");
1653 error = EIO;
1654 break;
1655 default:
1656 scsipi_printaddr(periph);
1657 printf("invalid return code from adapter: %d\n", xs->error);
1658 error = EIO;
1659 break;
1660 }
1661
1662 s = splbio();
1663 if (error == ERESTART) {
1664 /*
1665 * If we get here, the periph has been thawed and frozen
1666 * again if we had to issue recovery commands. Alternatively,
1667 * it may have been frozen again and in a timed thaw. In
1668 * any case, we thaw the periph once we re-enqueue the
1669 * command. Once the periph is fully thawed, it will begin
1670 * operation again.
1671 */
1672 xs->error = XS_NOERROR;
1673 xs->status = SCSI_OK;
1674 xs->xs_status &= ~XS_STS_DONE;
1675 xs->xs_requeuecnt++;
1676 error = scsipi_enqueue(xs);
1677 if (error == 0) {
1678 scsipi_periph_thaw(periph, 1);
1679 splx(s);
1680 return (ERESTART);
1681 }
1682 }
1683
1684 /*
1685 * scsipi_done() freezes the queue if not XS_NOERROR.
1686 * Thaw it here.
1687 */
1688 if (xs->error != XS_NOERROR)
1689 scsipi_periph_thaw(periph, 1);
1690
1691 /*
1692 * Set buffer fields in case the periph
1693 * switch done func uses them
1694 */
1695 if ((bp = xs->bp) != NULL) {
1696 if (error) {
1697 bp->b_error = error;
1698 bp->b_flags |= B_ERROR;
1699 bp->b_resid = bp->b_bcount;
1700 } else {
1701 bp->b_error = 0;
1702 bp->b_resid = xs->resid;
1703 }
1704 }
1705
1706 if (periph->periph_switch->psw_done)
1707 periph->periph_switch->psw_done(xs);
1708
1709 if (bp)
1710 biodone(bp);
1711
1712 if (xs->xs_control & XS_CTL_ASYNC)
1713 scsipi_put_xs(xs);
1714 splx(s);
1715
1716 return (error);
1717 }
1718
1719 /*
1720 * Issue a request sense for the given scsipi_xfer. Called when the xfer
1721 * returns with a CHECK_CONDITION status. Must be called in valid thread
1722 * context and at splbio().
1723 */
1724
1725 void
1726 scsipi_request_sense(xs)
1727 struct scsipi_xfer *xs;
1728 {
1729 struct scsipi_periph *periph = xs->xs_periph;
1730 int flags, error;
1731 struct scsipi_sense cmd;
1732
1733 periph->periph_flags |= PERIPH_SENSE;
1734
1735 /* if command was polling, request sense will too */
1736 flags = xs->xs_control & XS_CTL_POLL;
1737 /* Polling commands can't sleep */
1738 if (flags)
1739 flags |= XS_CTL_NOSLEEP;
1740
1741 flags |= XS_CTL_REQSENSE | XS_CTL_URGENT | XS_CTL_DATA_IN |
1742 XS_CTL_THAW_PERIPH | XS_CTL_FREEZE_PERIPH;
1743
1744 memset(&cmd, 0, sizeof(cmd));
1745 cmd.opcode = REQUEST_SENSE;
1746 cmd.length = sizeof(struct scsipi_sense_data);
1747
1748 error = scsipi_command(periph, NULL,
1749 (struct scsipi_generic *) &cmd, sizeof(cmd),
1750 (u_char*)&xs->sense.scsi_sense, sizeof(struct scsipi_sense_data),
1751 0, 1000, NULL, flags);
1752 periph->periph_flags &= ~PERIPH_SENSE;
1753 periph->periph_xscheck = NULL;
1754 switch(error) {
1755 case 0:
1756 /* we have a valid sense */
1757 xs->error = XS_SENSE;
1758 return;
1759 case EINTR:
1760 /* REQUEST_SENSE interrupted by bus reset. */
1761 xs->error = XS_RESET;
1762 return;
1763 case EIO:
1764 /* request sense coudn't be performed */
1765 /*
1766 * XXX this isn't quite right but we don't have anything
1767 * better for now
1768 */
1769 xs->error = XS_DRIVER_STUFFUP;
1770 return;
1771 default:
1772 /* Notify that request sense failed. */
1773 xs->error = XS_DRIVER_STUFFUP;
1774 scsipi_printaddr(periph);
1775 printf("request sense failed with error %d\n", error);
1776 return;
1777 }
1778 }
1779
1780 /*
1781 * scsipi_enqueue:
1782 *
1783 * Enqueue an xfer on a channel.
1784 */
1785 int
1786 scsipi_enqueue(xs)
1787 struct scsipi_xfer *xs;
1788 {
1789 struct scsipi_channel *chan = xs->xs_periph->periph_channel;
1790 struct scsipi_xfer *qxs;
1791 int s;
1792
1793 s = splbio();
1794
1795 /*
1796 * If the xfer is to be polled, and there are already jobs on
1797 * the queue, we can't proceed.
1798 */
1799 if ((xs->xs_control & XS_CTL_POLL) != 0 &&
1800 TAILQ_FIRST(&chan->chan_queue) != NULL) {
1801 splx(s);
1802 xs->error = XS_DRIVER_STUFFUP;
1803 return (EAGAIN);
1804 }
1805
1806 /*
1807 * If we have an URGENT xfer, it's an error recovery command
1808 * and it should just go on the head of the channel's queue.
1809 */
1810 if (xs->xs_control & XS_CTL_URGENT) {
1811 TAILQ_INSERT_HEAD(&chan->chan_queue, xs, channel_q);
1812 goto out;
1813 }
1814
1815 /*
1816 * If this xfer has already been on the queue before, we
1817 * need to reinsert it in the correct order. That order is:
1818 *
1819 * Immediately before the first xfer for this periph
1820 * with a requeuecnt less than xs->xs_requeuecnt.
1821 *
1822 * Failing that, at the end of the queue. (We'll end up
1823 * there naturally.)
1824 */
1825 if (xs->xs_requeuecnt != 0) {
1826 for (qxs = TAILQ_FIRST(&chan->chan_queue); qxs != NULL;
1827 qxs = TAILQ_NEXT(qxs, channel_q)) {
1828 if (qxs->xs_periph == xs->xs_periph &&
1829 qxs->xs_requeuecnt < xs->xs_requeuecnt)
1830 break;
1831 }
1832 if (qxs != NULL) {
1833 TAILQ_INSERT_AFTER(&chan->chan_queue, qxs, xs,
1834 channel_q);
1835 goto out;
1836 }
1837 }
1838 TAILQ_INSERT_TAIL(&chan->chan_queue, xs, channel_q);
1839 out:
1840 if (xs->xs_control & XS_CTL_THAW_PERIPH)
1841 scsipi_periph_thaw(xs->xs_periph, 1);
1842 splx(s);
1843 return (0);
1844 }
1845
1846 /*
1847 * scsipi_run_queue:
1848 *
1849 * Start as many xfers as possible running on the channel.
1850 */
1851 void
1852 scsipi_run_queue(chan)
1853 struct scsipi_channel *chan;
1854 {
1855 struct scsipi_xfer *xs;
1856 struct scsipi_periph *periph;
1857 int s;
1858
1859 for (;;) {
1860 s = splbio();
1861
1862 /*
1863 * If the channel is frozen, we can't do any work right
1864 * now.
1865 */
1866 if (chan->chan_qfreeze != 0) {
1867 splx(s);
1868 return;
1869 }
1870
1871 /*
1872 * Look for work to do, and make sure we can do it.
1873 */
1874 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL;
1875 xs = TAILQ_NEXT(xs, channel_q)) {
1876 periph = xs->xs_periph;
1877
1878 if ((periph->periph_sent >= periph->periph_openings) ||
1879 periph->periph_qfreeze != 0 ||
1880 (periph->periph_flags & PERIPH_UNTAG) != 0)
1881 continue;
1882
1883 if ((periph->periph_flags &
1884 (PERIPH_RECOVERING | PERIPH_SENSE)) != 0 &&
1885 (xs->xs_control & XS_CTL_URGENT) == 0)
1886 continue;
1887
1888 /*
1889 * We can issue this xfer!
1890 */
1891 goto got_one;
1892 }
1893
1894 /*
1895 * Can't find any work to do right now.
1896 */
1897 splx(s);
1898 return;
1899
1900 got_one:
1901 /*
1902 * Have an xfer to run. Allocate a resource from
1903 * the adapter to run it. If we can't allocate that
1904 * resource, we don't dequeue the xfer.
1905 */
1906 if (scsipi_get_resource(chan) == 0) {
1907 /*
1908 * Adapter is out of resources. If the adapter
1909 * supports it, attempt to grow them.
1910 */
1911 if (scsipi_grow_resources(chan) == 0) {
1912 /*
1913 * Wasn't able to grow resources,
1914 * nothing more we can do.
1915 */
1916 if (xs->xs_control & XS_CTL_POLL) {
1917 scsipi_printaddr(xs->xs_periph);
1918 printf("polling command but no "
1919 "adapter resources");
1920 /* We'll panic shortly... */
1921 }
1922 splx(s);
1923
1924 /*
1925 * XXX: We should be able to note that
1926 * XXX: that resources are needed here!
1927 */
1928 return;
1929 }
1930 /*
1931 * scsipi_grow_resources() allocated the resource
1932 * for us.
1933 */
1934 }
1935
1936 /*
1937 * We have a resource to run this xfer, do it!
1938 */
1939 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
1940
1941 /*
1942 * If the command is to be tagged, allocate a tag ID
1943 * for it.
1944 */
1945 if (XS_CTL_TAGTYPE(xs) != 0)
1946 scsipi_get_tag(xs);
1947 else
1948 periph->periph_flags |= PERIPH_UNTAG;
1949 periph->periph_sent++;
1950 splx(s);
1951
1952 scsipi_adapter_request(chan, ADAPTER_REQ_RUN_XFER, xs);
1953 }
1954 #ifdef DIAGNOSTIC
1955 panic("scsipi_run_queue: impossible");
1956 #endif
1957 }
1958
1959 /*
1960 * scsipi_execute_xs:
1961 *
1962 * Begin execution of an xfer, waiting for it to complete, if necessary.
1963 */
1964 int
1965 scsipi_execute_xs(xs)
1966 struct scsipi_xfer *xs;
1967 {
1968 struct scsipi_periph *periph = xs->xs_periph;
1969 struct scsipi_channel *chan = periph->periph_channel;
1970 int oasync, async, poll, retries, error, s;
1971
1972 xs->xs_status &= ~XS_STS_DONE;
1973 xs->error = XS_NOERROR;
1974 xs->resid = xs->datalen;
1975 xs->status = SCSI_OK;
1976
1977 #ifdef SCSIPI_DEBUG
1978 if (xs->xs_periph->periph_dbflags & SCSIPI_DB3) {
1979 printf("scsipi_execute_xs: ");
1980 show_scsipi_xs(xs);
1981 printf("\n");
1982 }
1983 #endif
1984
1985 /*
1986 * Deal with command tagging:
1987 *
1988 * - If the device's current operating mode doesn't
1989 * include tagged queueing, clear the tag mask.
1990 *
1991 * - If the device's current operating mode *does*
1992 * include tagged queueing, set the tag_type in
1993 * the xfer to the appropriate byte for the tag
1994 * message.
1995 */
1996 if ((PERIPH_XFER_MODE(periph) & PERIPH_CAP_TQING) == 0 ||
1997 (xs->xs_control & XS_CTL_REQSENSE)) {
1998 xs->xs_control &= ~XS_CTL_TAGMASK;
1999 xs->xs_tag_type = 0;
2000 } else {
2001 /*
2002 * If the request doesn't specify a tag, give Head
2003 * tags to URGENT operations and Ordered tags to
2004 * everything else.
2005 */
2006 if (XS_CTL_TAGTYPE(xs) == 0) {
2007 if (xs->xs_control & XS_CTL_URGENT)
2008 xs->xs_control |= XS_CTL_HEAD_TAG;
2009 else
2010 xs->xs_control |= XS_CTL_ORDERED_TAG;
2011 }
2012
2013 switch (XS_CTL_TAGTYPE(xs)) {
2014 case XS_CTL_ORDERED_TAG:
2015 xs->xs_tag_type = MSG_ORDERED_Q_TAG;
2016 break;
2017
2018 case XS_CTL_SIMPLE_TAG:
2019 xs->xs_tag_type = MSG_SIMPLE_Q_TAG;
2020 break;
2021
2022 case XS_CTL_HEAD_TAG:
2023 xs->xs_tag_type = MSG_HEAD_OF_Q_TAG;
2024 break;
2025
2026 default:
2027 scsipi_printaddr(periph);
2028 printf("invalid tag mask 0x%08x\n",
2029 XS_CTL_TAGTYPE(xs));
2030 panic("scsipi_execute_xs");
2031 }
2032 }
2033
2034 /* If the adaptor wants us to poll, poll. */
2035 if (chan->chan_adapter->adapt_flags & SCSIPI_ADAPT_POLL_ONLY)
2036 xs->xs_control |= XS_CTL_POLL;
2037
2038 /*
2039 * If we don't yet have a completion thread, or we are to poll for
2040 * completion, clear the ASYNC flag.
2041 */
2042 oasync = (xs->xs_control & XS_CTL_ASYNC);
2043 if (chan->chan_thread == NULL || (xs->xs_control & XS_CTL_POLL) != 0)
2044 xs->xs_control &= ~XS_CTL_ASYNC;
2045
2046 async = (xs->xs_control & XS_CTL_ASYNC);
2047 poll = (xs->xs_control & XS_CTL_POLL);
2048 retries = xs->xs_retries; /* for polling commands */
2049
2050 #ifdef DIAGNOSTIC
2051 if (oasync != 0 && xs->bp == NULL)
2052 panic("scsipi_execute_xs: XS_CTL_ASYNC but no buf");
2053 #endif
2054
2055 /*
2056 * Enqueue the transfer. If we're not polling for completion, this
2057 * should ALWAYS return `no error'.
2058 */
2059 try_again:
2060 error = scsipi_enqueue(xs);
2061 if (error) {
2062 if (poll == 0) {
2063 scsipi_printaddr(periph);
2064 printf("not polling, but enqueue failed with %d\n",
2065 error);
2066 panic("scsipi_execute_xs");
2067 }
2068
2069 scsipi_printaddr(periph);
2070 printf("failed to enqueue polling command");
2071 if (retries != 0) {
2072 printf(", retrying...\n");
2073 delay(1000000);
2074 retries--;
2075 goto try_again;
2076 }
2077 printf("\n");
2078 goto free_xs;
2079 }
2080
2081 restarted:
2082 scsipi_run_queue(chan);
2083
2084 /*
2085 * The xfer is enqueued, and possibly running. If it's to be
2086 * completed asynchronously, just return now.
2087 */
2088 if (async)
2089 return (EJUSTRETURN);
2090
2091 /*
2092 * Not an asynchronous command; wait for it to complete.
2093 */
2094 s = splbio();
2095 while ((xs->xs_status & XS_STS_DONE) == 0) {
2096 if (poll) {
2097 scsipi_printaddr(periph);
2098 printf("polling command not done\n");
2099 panic("scsipi_execute_xs");
2100 }
2101 (void) tsleep(xs, PRIBIO, "xscmd", 0);
2102 }
2103 splx(s);
2104
2105 /*
2106 * Command is complete. scsipi_done() has awakened us to perform
2107 * the error handling.
2108 */
2109 error = scsipi_complete(xs);
2110 if (error == ERESTART)
2111 goto restarted;
2112
2113 /*
2114 * If it was meant to run async and we cleared aync ourselve,
2115 * don't return an error here. It has already been handled
2116 */
2117 if (oasync)
2118 error = EJUSTRETURN;
2119 /*
2120 * Command completed successfully or fatal error occurred. Fall
2121 * into....
2122 */
2123 free_xs:
2124 s = splbio();
2125 scsipi_put_xs(xs);
2126 splx(s);
2127
2128 /*
2129 * Kick the queue, keep it running in case it stopped for some
2130 * reason.
2131 */
2132 scsipi_run_queue(chan);
2133
2134 return (error);
2135 }
2136
2137 /*
2138 * scsipi_completion_thread:
2139 *
2140 * This is the completion thread. We wait for errors on
2141 * asynchronous xfers, and perform the error handling
2142 * function, restarting the command, if necessary.
2143 */
2144 void
2145 scsipi_completion_thread(arg)
2146 void *arg;
2147 {
2148 struct scsipi_channel *chan = arg;
2149 struct scsipi_xfer *xs;
2150 int s;
2151
2152 if (chan->chan_init_cb)
2153 (*chan->chan_init_cb)(chan, chan->chan_init_cb_arg);
2154
2155 s = splbio();
2156 chan->chan_flags |= SCSIPI_CHAN_TACTIVE;
2157 splx(s);
2158 for (;;) {
2159 s = splbio();
2160 xs = TAILQ_FIRST(&chan->chan_complete);
2161 if (xs == NULL && chan->chan_tflags == 0) {
2162 /* nothing to do; wait */
2163 (void) tsleep(&chan->chan_complete, PRIBIO,
2164 "sccomp", 0);
2165 splx(s);
2166 continue;
2167 }
2168 if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) {
2169 /* call chan_callback from thread context */
2170 chan->chan_tflags &= ~SCSIPI_CHANT_CALLBACK;
2171 chan->chan_callback(chan, chan->chan_callback_arg);
2172 splx(s);
2173 continue;
2174 }
2175 if (chan->chan_tflags & SCSIPI_CHANT_GROWRES) {
2176 /* attempt to get more openings for this channel */
2177 chan->chan_tflags &= ~SCSIPI_CHANT_GROWRES;
2178 scsipi_adapter_request(chan,
2179 ADAPTER_REQ_GROW_RESOURCES, NULL);
2180 scsipi_channel_thaw(chan, 1);
2181 splx(s);
2182 continue;
2183 }
2184 if (chan->chan_tflags & SCSIPI_CHANT_KICK) {
2185 /* explicitly run the queues for this channel */
2186 chan->chan_tflags &= ~SCSIPI_CHANT_KICK;
2187 scsipi_run_queue(chan);
2188 splx(s);
2189 continue;
2190 }
2191 if (chan->chan_tflags & SCSIPI_CHANT_SHUTDOWN) {
2192 splx(s);
2193 break;
2194 }
2195 if (xs) {
2196 TAILQ_REMOVE(&chan->chan_complete, xs, channel_q);
2197 splx(s);
2198
2199 /*
2200 * Have an xfer with an error; process it.
2201 */
2202 (void) scsipi_complete(xs);
2203
2204 /*
2205 * Kick the queue; keep it running if it was stopped
2206 * for some reason.
2207 */
2208 scsipi_run_queue(chan);
2209 } else {
2210 splx(s);
2211 }
2212 }
2213
2214 chan->chan_thread = NULL;
2215
2216 /* In case parent is waiting for us to exit. */
2217 wakeup(&chan->chan_thread);
2218
2219 kthread_exit(0);
2220 }
2221
2222 /*
2223 * scsipi_create_completion_thread:
2224 *
2225 * Callback to actually create the completion thread.
2226 */
2227 void
2228 scsipi_create_completion_thread(arg)
2229 void *arg;
2230 {
2231 struct scsipi_channel *chan = arg;
2232 struct scsipi_adapter *adapt = chan->chan_adapter;
2233
2234 if (kthread_create1(scsipi_completion_thread, chan,
2235 &chan->chan_thread, "%s", chan->chan_name)) {
2236 printf("%s: unable to create completion thread for "
2237 "channel %d\n", adapt->adapt_dev->dv_xname,
2238 chan->chan_channel);
2239 panic("scsipi_create_completion_thread");
2240 }
2241 }
2242
2243 /*
2244 * scsipi_thread_call_callback:
2245 *
2246 * request to call a callback from the completion thread
2247 */
2248 int
2249 scsipi_thread_call_callback(chan, callback, arg)
2250 struct scsipi_channel *chan;
2251 void (*callback) __P((struct scsipi_channel *, void *));
2252 void *arg;
2253 {
2254 int s;
2255
2256 s = splbio();
2257 if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
2258 /* kernel thread doesn't exist yet */
2259 splx(s);
2260 return ESRCH;
2261 }
2262 if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) {
2263 splx(s);
2264 return EBUSY;
2265 }
2266 scsipi_channel_freeze(chan, 1);
2267 chan->chan_callback = callback;
2268 chan->chan_callback_arg = arg;
2269 chan->chan_tflags |= SCSIPI_CHANT_CALLBACK;
2270 wakeup(&chan->chan_complete);
2271 splx(s);
2272 return(0);
2273 }
2274
2275 /*
2276 * scsipi_async_event:
2277 *
2278 * Handle an asynchronous event from an adapter.
2279 */
2280 void
2281 scsipi_async_event(chan, event, arg)
2282 struct scsipi_channel *chan;
2283 scsipi_async_event_t event;
2284 void *arg;
2285 {
2286 int s;
2287
2288 s = splbio();
2289 switch (event) {
2290 case ASYNC_EVENT_MAX_OPENINGS:
2291 scsipi_async_event_max_openings(chan,
2292 (struct scsipi_max_openings *)arg);
2293 break;
2294
2295 case ASYNC_EVENT_XFER_MODE:
2296 scsipi_async_event_xfer_mode(chan,
2297 (struct scsipi_xfer_mode *)arg);
2298 break;
2299 case ASYNC_EVENT_RESET:
2300 scsipi_async_event_channel_reset(chan);
2301 break;
2302 }
2303 splx(s);
2304 }
2305
2306 /*
2307 * scsipi_print_xfer_mode:
2308 *
2309 * Print a periph's capabilities.
2310 */
2311 void
2312 scsipi_print_xfer_mode(periph)
2313 struct scsipi_periph *periph;
2314 {
2315 int period, freq, speed, mbs;
2316
2317 if ((periph->periph_flags & PERIPH_MODE_VALID) == 0)
2318 return;
2319
2320 aprint_normal("%s: ", periph->periph_dev->dv_xname);
2321 if (periph->periph_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_DT)) {
2322 period = scsipi_sync_factor_to_period(periph->periph_period);
2323 aprint_normal("sync (%d.%02dns offset %d)",
2324 period / 100, period % 100, periph->periph_offset);
2325 } else
2326 aprint_normal("async");
2327
2328 if (periph->periph_mode & PERIPH_CAP_WIDE32)
2329 aprint_normal(", 32-bit");
2330 else if (periph->periph_mode & (PERIPH_CAP_WIDE16 | PERIPH_CAP_DT))
2331 aprint_normal(", 16-bit");
2332 else
2333 aprint_normal(", 8-bit");
2334
2335 if (periph->periph_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_DT)) {
2336 freq = scsipi_sync_factor_to_freq(periph->periph_period);
2337 speed = freq;
2338 if (periph->periph_mode & PERIPH_CAP_WIDE32)
2339 speed *= 4;
2340 else if (periph->periph_mode &
2341 (PERIPH_CAP_WIDE16 | PERIPH_CAP_DT))
2342 speed *= 2;
2343 mbs = speed / 1000;
2344 if (mbs > 0)
2345 aprint_normal(" (%d.%03dMB/s)", mbs, speed % 1000);
2346 else
2347 aprint_normal(" (%dKB/s)", speed % 1000);
2348 }
2349
2350 aprint_normal(" transfers");
2351
2352 if (periph->periph_mode & PERIPH_CAP_TQING)
2353 aprint_normal(", tagged queueing");
2354
2355 aprint_normal("\n");
2356 }
2357
2358 /*
2359 * scsipi_async_event_max_openings:
2360 *
2361 * Update the maximum number of outstanding commands a
2362 * device may have.
2363 */
2364 void
2365 scsipi_async_event_max_openings(chan, mo)
2366 struct scsipi_channel *chan;
2367 struct scsipi_max_openings *mo;
2368 {
2369 struct scsipi_periph *periph;
2370 int minlun, maxlun;
2371
2372 if (mo->mo_lun == -1) {
2373 /*
2374 * Wildcarded; apply it to all LUNs.
2375 */
2376 minlun = 0;
2377 maxlun = chan->chan_nluns - 1;
2378 } else
2379 minlun = maxlun = mo->mo_lun;
2380
2381 /* XXX This could really suck with a large LUN space. */
2382 for (; minlun <= maxlun; minlun++) {
2383 periph = scsipi_lookup_periph(chan, mo->mo_target, minlun);
2384 if (periph == NULL)
2385 continue;
2386
2387 if (mo->mo_openings < periph->periph_openings)
2388 periph->periph_openings = mo->mo_openings;
2389 else if (mo->mo_openings > periph->periph_openings &&
2390 (periph->periph_flags & PERIPH_GROW_OPENINGS) != 0)
2391 periph->periph_openings = mo->mo_openings;
2392 }
2393 }
2394
2395 /*
2396 * scsipi_async_event_xfer_mode:
2397 *
2398 * Update the xfer mode for all periphs sharing the
2399 * specified I_T Nexus.
2400 */
2401 void
2402 scsipi_async_event_xfer_mode(chan, xm)
2403 struct scsipi_channel *chan;
2404 struct scsipi_xfer_mode *xm;
2405 {
2406 struct scsipi_periph *periph;
2407 int lun, announce, mode, period, offset;
2408
2409 for (lun = 0; lun < chan->chan_nluns; lun++) {
2410 periph = scsipi_lookup_periph(chan, xm->xm_target, lun);
2411 if (periph == NULL)
2412 continue;
2413 announce = 0;
2414
2415 /*
2416 * Clamp the xfer mode down to this periph's capabilities.
2417 */
2418 mode = xm->xm_mode & periph->periph_cap;
2419 if (mode & PERIPH_CAP_SYNC) {
2420 period = xm->xm_period;
2421 offset = xm->xm_offset;
2422 } else {
2423 period = 0;
2424 offset = 0;
2425 }
2426
2427 /*
2428 * If we do not have a valid xfer mode yet, or the parameters
2429 * are different, announce them.
2430 */
2431 if ((periph->periph_flags & PERIPH_MODE_VALID) == 0 ||
2432 periph->periph_mode != mode ||
2433 periph->periph_period != period ||
2434 periph->periph_offset != offset)
2435 announce = 1;
2436
2437 periph->periph_mode = mode;
2438 periph->periph_period = period;
2439 periph->periph_offset = offset;
2440 periph->periph_flags |= PERIPH_MODE_VALID;
2441
2442 if (announce)
2443 scsipi_print_xfer_mode(periph);
2444 }
2445 }
2446
2447 /*
2448 * scsipi_set_xfer_mode:
2449 *
2450 * Set the xfer mode for the specified I_T Nexus.
2451 */
2452 void
2453 scsipi_set_xfer_mode(chan, target, immed)
2454 struct scsipi_channel *chan;
2455 int target, immed;
2456 {
2457 struct scsipi_xfer_mode xm;
2458 struct scsipi_periph *itperiph;
2459 int lun, s;
2460
2461 /*
2462 * Go to the minimal xfer mode.
2463 */
2464 xm.xm_target = target;
2465 xm.xm_mode = 0;
2466 xm.xm_period = 0; /* ignored */
2467 xm.xm_offset = 0; /* ignored */
2468
2469 /*
2470 * Find the first LUN we know about on this I_T Nexus.
2471 */
2472 for (itperiph = NULL, lun = 0; lun < chan->chan_nluns; lun++) {
2473 itperiph = scsipi_lookup_periph(chan, target, lun);
2474 if (itperiph != NULL)
2475 break;
2476 }
2477 if (itperiph != NULL) {
2478 xm.xm_mode = itperiph->periph_cap;
2479 /*
2480 * Now issue the request to the adapter.
2481 */
2482 s = splbio();
2483 scsipi_adapter_request(chan, ADAPTER_REQ_SET_XFER_MODE, &xm);
2484 splx(s);
2485 /*
2486 * If we want this to happen immediately, issue a dummy
2487 * command, since most adapters can't really negotiate unless
2488 * they're executing a job.
2489 */
2490 if (immed != 0) {
2491 (void) scsipi_test_unit_ready(itperiph,
2492 XS_CTL_DISCOVERY | XS_CTL_IGNORE_ILLEGAL_REQUEST |
2493 XS_CTL_IGNORE_NOT_READY |
2494 XS_CTL_IGNORE_MEDIA_CHANGE);
2495 }
2496 }
2497 }
2498
2499 /*
2500 * scsipi_channel_reset:
2501 *
2502 * handle scsi bus reset
2503 * called at splbio
2504 */
2505 void
2506 scsipi_async_event_channel_reset(chan)
2507 struct scsipi_channel *chan;
2508 {
2509 struct scsipi_xfer *xs, *xs_next;
2510 struct scsipi_periph *periph;
2511 int target, lun;
2512
2513 /*
2514 * Channel has been reset. Also mark as reset pending REQUEST_SENSE
2515 * commands; as the sense is not available any more.
2516 * can't call scsipi_done() from here, as the command has not been
2517 * sent to the adapter yet (this would corrupt accounting).
2518 */
2519
2520 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL; xs = xs_next) {
2521 xs_next = TAILQ_NEXT(xs, channel_q);
2522 if (xs->xs_control & XS_CTL_REQSENSE) {
2523 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
2524 xs->error = XS_RESET;
2525 if ((xs->xs_control & XS_CTL_ASYNC) != 0)
2526 TAILQ_INSERT_TAIL(&chan->chan_complete, xs,
2527 channel_q);
2528 }
2529 }
2530 wakeup(&chan->chan_complete);
2531 /* Catch xs with pending sense which may not have a REQSENSE xs yet */
2532 for (target = 0; target < chan->chan_ntargets; target++) {
2533 if (target == chan->chan_id)
2534 continue;
2535 for (lun = 0; lun < chan->chan_nluns; lun++) {
2536 periph = scsipi_lookup_periph(chan, target, lun);
2537 if (periph) {
2538 xs = periph->periph_xscheck;
2539 if (xs)
2540 xs->error = XS_RESET;
2541 }
2542 }
2543 }
2544 }
2545
2546 /*
2547 * scsipi_target_detach:
2548 *
2549 * detach all periph associated with a I_T
2550 * must be called from valid thread context
2551 */
2552 int
2553 scsipi_target_detach(chan, target, lun, flags)
2554 struct scsipi_channel *chan;
2555 int target, lun;
2556 int flags;
2557 {
2558 struct scsipi_periph *periph;
2559 int ctarget, mintarget, maxtarget;
2560 int clun, minlun, maxlun;
2561 int error;
2562
2563 if (target == -1) {
2564 mintarget = 0;
2565 maxtarget = chan->chan_ntargets;
2566 } else {
2567 if (target == chan->chan_id)
2568 return EINVAL;
2569 if (target < 0 || target >= chan->chan_ntargets)
2570 return EINVAL;
2571 mintarget = target;
2572 maxtarget = target + 1;
2573 }
2574
2575 if (lun == -1) {
2576 minlun = 0;
2577 maxlun = chan->chan_nluns;
2578 } else {
2579 if (lun < 0 || lun >= chan->chan_nluns)
2580 return EINVAL;
2581 minlun = lun;
2582 maxlun = lun + 1;
2583 }
2584
2585 for (ctarget = mintarget; ctarget < maxtarget; ctarget++) {
2586 if (ctarget == chan->chan_id)
2587 continue;
2588
2589 for (clun = minlun; clun < maxlun; clun++) {
2590 periph = scsipi_lookup_periph(chan, ctarget, clun);
2591 if (periph == NULL)
2592 continue;
2593 error = config_detach(periph->periph_dev, flags);
2594 if (error)
2595 return (error);
2596 scsipi_remove_periph(chan, periph);
2597 free(periph, M_DEVBUF);
2598 }
2599 }
2600 return(0);
2601 }
2602
2603 /*
2604 * scsipi_adapter_addref:
2605 *
2606 * Add a reference to the adapter pointed to by the provided
2607 * link, enabling the adapter if necessary.
2608 */
2609 int
2610 scsipi_adapter_addref(adapt)
2611 struct scsipi_adapter *adapt;
2612 {
2613 int s, error = 0;
2614
2615 s = splbio();
2616 if (adapt->adapt_refcnt++ == 0 && adapt->adapt_enable != NULL) {
2617 error = (*adapt->adapt_enable)(adapt->adapt_dev, 1);
2618 if (error)
2619 adapt->adapt_refcnt--;
2620 }
2621 splx(s);
2622 return (error);
2623 }
2624
2625 /*
2626 * scsipi_adapter_delref:
2627 *
2628 * Delete a reference to the adapter pointed to by the provided
2629 * link, disabling the adapter if possible.
2630 */
2631 void
2632 scsipi_adapter_delref(adapt)
2633 struct scsipi_adapter *adapt;
2634 {
2635 int s;
2636
2637 s = splbio();
2638 if (adapt->adapt_refcnt-- == 1 && adapt->adapt_enable != NULL)
2639 (void) (*adapt->adapt_enable)(adapt->adapt_dev, 0);
2640 splx(s);
2641 }
2642
2643 struct scsipi_syncparam {
2644 int ss_factor;
2645 int ss_period; /* ns * 100 */
2646 } scsipi_syncparams[] = {
2647 { 0x08, 625 }, /* FAST-160 (Ultra320) */
2648 { 0x09, 1250 }, /* FAST-80 (Ultra160) */
2649 { 0x0a, 2500 }, /* FAST-40 40MHz (Ultra2) */
2650 { 0x0b, 3030 }, /* FAST-40 33MHz (Ultra2) */
2651 { 0x0c, 5000 }, /* FAST-20 (Ultra) */
2652 };
2653 const int scsipi_nsyncparams =
2654 sizeof(scsipi_syncparams) / sizeof(scsipi_syncparams[0]);
2655
2656 int
2657 scsipi_sync_period_to_factor(period)
2658 int period; /* ns * 100 */
2659 {
2660 int i;
2661
2662 for (i = 0; i < scsipi_nsyncparams; i++) {
2663 if (period <= scsipi_syncparams[i].ss_period)
2664 return (scsipi_syncparams[i].ss_factor);
2665 }
2666
2667 return ((period / 100) / 4);
2668 }
2669
2670 int
2671 scsipi_sync_factor_to_period(factor)
2672 int factor;
2673 {
2674 int i;
2675
2676 for (i = 0; i < scsipi_nsyncparams; i++) {
2677 if (factor == scsipi_syncparams[i].ss_factor)
2678 return (scsipi_syncparams[i].ss_period);
2679 }
2680
2681 return ((factor * 4) * 100);
2682 }
2683
2684 int
2685 scsipi_sync_factor_to_freq(factor)
2686 int factor;
2687 {
2688 int i;
2689
2690 for (i = 0; i < scsipi_nsyncparams; i++) {
2691 if (factor == scsipi_syncparams[i].ss_factor)
2692 return (100000000 / scsipi_syncparams[i].ss_period);
2693 }
2694
2695 return (10000000 / ((factor * 4) * 10));
2696 }
2697
2698 #ifdef SCSIPI_DEBUG
2699 /*
2700 * Given a scsipi_xfer, dump the request, in all it's glory
2701 */
2702 void
2703 show_scsipi_xs(xs)
2704 struct scsipi_xfer *xs;
2705 {
2706
2707 printf("xs(%p): ", xs);
2708 printf("xs_control(0x%08x)", xs->xs_control);
2709 printf("xs_status(0x%08x)", xs->xs_status);
2710 printf("periph(%p)", xs->xs_periph);
2711 printf("retr(0x%x)", xs->xs_retries);
2712 printf("timo(0x%x)", xs->timeout);
2713 printf("cmd(%p)", xs->cmd);
2714 printf("len(0x%x)", xs->cmdlen);
2715 printf("data(%p)", xs->data);
2716 printf("len(0x%x)", xs->datalen);
2717 printf("res(0x%x)", xs->resid);
2718 printf("err(0x%x)", xs->error);
2719 printf("bp(%p)", xs->bp);
2720 show_scsipi_cmd(xs);
2721 }
2722
2723 void
2724 show_scsipi_cmd(xs)
2725 struct scsipi_xfer *xs;
2726 {
2727 u_char *b = (u_char *) xs->cmd;
2728 int i = 0;
2729
2730 scsipi_printaddr(xs->xs_periph);
2731 printf(" command: ");
2732
2733 if ((xs->xs_control & XS_CTL_RESET) == 0) {
2734 while (i < xs->cmdlen) {
2735 if (i)
2736 printf(",");
2737 printf("0x%x", b[i++]);
2738 }
2739 printf("-[%d bytes]\n", xs->datalen);
2740 if (xs->datalen)
2741 show_mem(xs->data, min(64, xs->datalen));
2742 } else
2743 printf("-RESET-\n");
2744 }
2745
2746 void
2747 show_mem(address, num)
2748 u_char *address;
2749 int num;
2750 {
2751 int x;
2752
2753 printf("------------------------------");
2754 for (x = 0; x < num; x++) {
2755 if ((x % 16) == 0)
2756 printf("\n%03d: ", x);
2757 printf("%02x ", *address++);
2758 }
2759 printf("\n------------------------------\n");
2760 }
2761 #endif /* SCSIPI_DEBUG */
Cache object: 7c7bde1066236f84a43a72e0dbf9df30
|