FreeBSD/Linux Kernel Cross Reference
sys/cam/cam_periph.c
1 /*-
2 * Common functions for CAM "type" (peripheral) drivers.
3 *
4 * Copyright (c) 1997, 1998 Justin T. Gibbs.
5 * Copyright (c) 1997, 1998, 1999, 2000 Kenneth D. Merry.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions, and the following disclaimer,
13 * without modification, immediately at the beginning of the file.
14 * 2. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD: src/sys/cam/cam_periph.c,v 1.56.4.3 2005/03/12 10:01:39 delphij Exp $");
32
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/types.h>
36 #include <sys/malloc.h>
37 #include <sys/linker_set.h>
38 #include <sys/bio.h>
39 #include <sys/lock.h>
40 #include <sys/mutex.h>
41 #include <sys/buf.h>
42 #include <sys/proc.h>
43 #include <sys/devicestat.h>
44 #include <sys/bus.h>
45 #include <vm/vm.h>
46 #include <vm/vm_extern.h>
47
48 #include <cam/cam.h>
49 #include <cam/cam_ccb.h>
50 #include <cam/cam_xpt_periph.h>
51 #include <cam/cam_periph.h>
52 #include <cam/cam_debug.h>
53
54 #include <cam/scsi/scsi_all.h>
55 #include <cam/scsi/scsi_message.h>
56 #include <cam/scsi/scsi_pass.h>
57
58 static u_int camperiphnextunit(struct periph_driver *p_drv,
59 u_int newunit, int wired,
60 path_id_t pathid, target_id_t target,
61 lun_id_t lun);
62 static u_int camperiphunit(struct periph_driver *p_drv,
63 path_id_t pathid, target_id_t target,
64 lun_id_t lun);
65 static void camperiphdone(struct cam_periph *periph,
66 union ccb *done_ccb);
67 static void camperiphfree(struct cam_periph *periph);
68 static int camperiphscsistatuserror(union ccb *ccb,
69 cam_flags camflags,
70 u_int32_t sense_flags,
71 union ccb *save_ccb,
72 int *openings,
73 u_int32_t *relsim_flags,
74 u_int32_t *timeout);
75 static int camperiphscsisenseerror(union ccb *ccb,
76 cam_flags camflags,
77 u_int32_t sense_flags,
78 union ccb *save_ccb,
79 int *openings,
80 u_int32_t *relsim_flags,
81 u_int32_t *timeout);
82
83 static int nperiph_drivers;
84 struct periph_driver **periph_drivers;
85
86 void
87 periphdriver_register(void *data)
88 {
89 struct periph_driver **newdrivers, **old;
90 int ndrivers;
91
92 ndrivers = nperiph_drivers + 2;
93 newdrivers = malloc(sizeof(*newdrivers) * ndrivers, M_TEMP, M_WAITOK);
94 if (periph_drivers)
95 bcopy(periph_drivers, newdrivers,
96 sizeof(*newdrivers) * nperiph_drivers);
97 newdrivers[nperiph_drivers] = (struct periph_driver *)data;
98 newdrivers[nperiph_drivers + 1] = NULL;
99 old = periph_drivers;
100 periph_drivers = newdrivers;
101 if (old)
102 free(old, M_TEMP);
103 nperiph_drivers++;
104 }
105
106 cam_status
107 cam_periph_alloc(periph_ctor_t *periph_ctor,
108 periph_oninv_t *periph_oninvalidate,
109 periph_dtor_t *periph_dtor, periph_start_t *periph_start,
110 char *name, cam_periph_type type, struct cam_path *path,
111 ac_callback_t *ac_callback, ac_code code, void *arg)
112 {
113 struct periph_driver **p_drv;
114 struct cam_periph *periph;
115 struct cam_periph *cur_periph;
116 path_id_t path_id;
117 target_id_t target_id;
118 lun_id_t lun_id;
119 cam_status status;
120 u_int init_level;
121 int s;
122
123 init_level = 0;
124 /*
125 * Handle Hot-Plug scenarios. If there is already a peripheral
126 * of our type assigned to this path, we are likely waiting for
127 * final close on an old, invalidated, peripheral. If this is
128 * the case, queue up a deferred call to the peripheral's async
129 * handler. If it looks like a mistaken re-allocation, complain.
130 */
131 if ((periph = cam_periph_find(path, name)) != NULL) {
132
133 if ((periph->flags & CAM_PERIPH_INVALID) != 0
134 && (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) == 0) {
135 periph->flags |= CAM_PERIPH_NEW_DEV_FOUND;
136 periph->deferred_callback = ac_callback;
137 periph->deferred_ac = code;
138 return (CAM_REQ_INPROG);
139 } else {
140 printf("cam_periph_alloc: attempt to re-allocate "
141 "valid device %s%d rejected\n",
142 periph->periph_name, periph->unit_number);
143 }
144 return (CAM_REQ_INVALID);
145 }
146
147 periph = (struct cam_periph *)malloc(sizeof(*periph), M_DEVBUF,
148 M_NOWAIT);
149
150 if (periph == NULL)
151 return (CAM_RESRC_UNAVAIL);
152
153 init_level++;
154
155 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
156 if (strcmp((*p_drv)->driver_name, name) == 0)
157 break;
158 }
159
160 path_id = xpt_path_path_id(path);
161 target_id = xpt_path_target_id(path);
162 lun_id = xpt_path_lun_id(path);
163 bzero(periph, sizeof(*periph));
164 cam_init_pinfo(&periph->pinfo);
165 periph->periph_start = periph_start;
166 periph->periph_dtor = periph_dtor;
167 periph->periph_oninval = periph_oninvalidate;
168 periph->type = type;
169 periph->periph_name = name;
170 periph->unit_number = camperiphunit(*p_drv, path_id, target_id, lun_id);
171 periph->immediate_priority = CAM_PRIORITY_NONE;
172 periph->refcount = 0;
173 SLIST_INIT(&periph->ccb_list);
174 status = xpt_create_path(&path, periph, path_id, target_id, lun_id);
175 if (status != CAM_REQ_CMP)
176 goto failure;
177
178 periph->path = path;
179 init_level++;
180
181 status = xpt_add_periph(periph);
182
183 if (status != CAM_REQ_CMP)
184 goto failure;
185
186 s = splsoftcam();
187 cur_periph = TAILQ_FIRST(&(*p_drv)->units);
188 while (cur_periph != NULL
189 && cur_periph->unit_number < periph->unit_number)
190 cur_periph = TAILQ_NEXT(cur_periph, unit_links);
191
192 if (cur_periph != NULL)
193 TAILQ_INSERT_BEFORE(cur_periph, periph, unit_links);
194 else {
195 TAILQ_INSERT_TAIL(&(*p_drv)->units, periph, unit_links);
196 (*p_drv)->generation++;
197 }
198
199 splx(s);
200
201 init_level++;
202
203 status = periph_ctor(periph, arg);
204
205 if (status == CAM_REQ_CMP)
206 init_level++;
207
208 failure:
209 switch (init_level) {
210 case 4:
211 /* Initialized successfully */
212 break;
213 case 3:
214 s = splsoftcam();
215 TAILQ_REMOVE(&(*p_drv)->units, periph, unit_links);
216 splx(s);
217 xpt_remove_periph(periph);
218 /* FALLTHROUGH */
219 case 2:
220 xpt_free_path(periph->path);
221 /* FALLTHROUGH */
222 case 1:
223 free(periph, M_DEVBUF);
224 /* FALLTHROUGH */
225 case 0:
226 /* No cleanup to perform. */
227 break;
228 default:
229 panic("cam_periph_alloc: Unkown init level");
230 }
231 return(status);
232 }
233
234 /*
235 * Find a peripheral structure with the specified path, target, lun,
236 * and (optionally) type. If the name is NULL, this function will return
237 * the first peripheral driver that matches the specified path.
238 */
239 struct cam_periph *
240 cam_periph_find(struct cam_path *path, char *name)
241 {
242 struct periph_driver **p_drv;
243 struct cam_periph *periph;
244 int s;
245
246 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
247
248 if (name != NULL && (strcmp((*p_drv)->driver_name, name) != 0))
249 continue;
250
251 s = splsoftcam();
252 TAILQ_FOREACH(periph, &(*p_drv)->units, unit_links) {
253 if (xpt_path_comp(periph->path, path) == 0) {
254 splx(s);
255 return(periph);
256 }
257 }
258 splx(s);
259 if (name != NULL)
260 return(NULL);
261 }
262 return(NULL);
263 }
264
265 cam_status
266 cam_periph_acquire(struct cam_periph *periph)
267 {
268 int s;
269
270 if (periph == NULL)
271 return(CAM_REQ_CMP_ERR);
272
273 s = splsoftcam();
274 periph->refcount++;
275 splx(s);
276
277 return(CAM_REQ_CMP);
278 }
279
280 void
281 cam_periph_release(struct cam_periph *periph)
282 {
283 int s;
284
285 if (periph == NULL)
286 return;
287
288 s = splsoftcam();
289 if ((--periph->refcount == 0)
290 && (periph->flags & CAM_PERIPH_INVALID)) {
291 camperiphfree(periph);
292 }
293 splx(s);
294
295 }
296
297 /*
298 * Look for the next unit number that is not currently in use for this
299 * peripheral type starting at "newunit". Also exclude unit numbers that
300 * are reserved by for future "hardwiring" unless we already know that this
301 * is a potential wired device. Only assume that the device is "wired" the
302 * first time through the loop since after that we'll be looking at unit
303 * numbers that did not match a wiring entry.
304 */
305 static u_int
306 camperiphnextunit(struct periph_driver *p_drv, u_int newunit, int wired,
307 path_id_t pathid, target_id_t target, lun_id_t lun)
308 {
309 struct cam_periph *periph;
310 char *periph_name;
311 int s;
312 int i, val, dunit, r;
313 const char *dname, *strval;
314
315 s = splsoftcam();
316 periph_name = p_drv->driver_name;
317 for (;;newunit++) {
318
319 for (periph = TAILQ_FIRST(&p_drv->units);
320 periph != NULL && periph->unit_number != newunit;
321 periph = TAILQ_NEXT(periph, unit_links))
322 ;
323
324 if (periph != NULL && periph->unit_number == newunit) {
325 if (wired != 0) {
326 xpt_print_path(periph->path);
327 printf("Duplicate Wired Device entry!\n");
328 xpt_print_path(periph->path);
329 printf("Second device (%s device at scbus%d "
330 "target %d lun %d) will not be wired\n",
331 periph_name, pathid, target, lun);
332 wired = 0;
333 }
334 continue;
335 }
336 if (wired)
337 break;
338
339 /*
340 * Don't match entries like "da 4" as a wired down
341 * device, but do match entries like "da 4 target 5"
342 * or even "da 4 scbus 1".
343 */
344 i = 0;
345 dname = periph_name;
346 for (;;) {
347 r = resource_find_dev(&i, dname, &dunit, NULL, NULL);
348 if (r != 0)
349 break;
350 /* if no "target" and no specific scbus, skip */
351 if (resource_int_value(dname, dunit, "target", &val) &&
352 (resource_string_value(dname, dunit, "at",&strval)||
353 strcmp(strval, "scbus") == 0))
354 continue;
355 if (newunit == dunit)
356 break;
357 }
358 if (r != 0)
359 break;
360 }
361 splx(s);
362 return (newunit);
363 }
364
365 static u_int
366 camperiphunit(struct periph_driver *p_drv, path_id_t pathid,
367 target_id_t target, lun_id_t lun)
368 {
369 u_int unit;
370 int wired, i, val, dunit;
371 const char *dname, *strval;
372 char pathbuf[32], *periph_name;
373
374 periph_name = p_drv->driver_name;
375 snprintf(pathbuf, sizeof(pathbuf), "scbus%d", pathid);
376 unit = 0;
377 i = 0;
378 dname = periph_name;
379 for (wired = 0; resource_find_dev(&i, dname, &dunit, NULL, NULL) == 0;
380 wired = 0) {
381 if (resource_string_value(dname, dunit, "at", &strval) == 0) {
382 if (strcmp(strval, pathbuf) != 0)
383 continue;
384 wired++;
385 }
386 if (resource_int_value(dname, dunit, "target", &val) == 0) {
387 if (val != target)
388 continue;
389 wired++;
390 }
391 if (resource_int_value(dname, dunit, "lun", &val) == 0) {
392 if (val != lun)
393 continue;
394 wired++;
395 }
396 if (wired != 0) {
397 unit = dunit;
398 break;
399 }
400 }
401
402 /*
403 * Either start from 0 looking for the next unit or from
404 * the unit number given in the resource config. This way,
405 * if we have wildcard matches, we don't return the same
406 * unit number twice.
407 */
408 unit = camperiphnextunit(p_drv, unit, wired, pathid, target, lun);
409
410 return (unit);
411 }
412
413 void
414 cam_periph_invalidate(struct cam_periph *periph)
415 {
416 int s;
417
418 s = splsoftcam();
419 /*
420 * We only call this routine the first time a peripheral is
421 * invalidated. The oninvalidate() routine is always called at
422 * splsoftcam().
423 */
424 if (((periph->flags & CAM_PERIPH_INVALID) == 0)
425 && (periph->periph_oninval != NULL))
426 periph->periph_oninval(periph);
427
428 periph->flags |= CAM_PERIPH_INVALID;
429 periph->flags &= ~CAM_PERIPH_NEW_DEV_FOUND;
430
431 if (periph->refcount == 0)
432 camperiphfree(periph);
433 else if (periph->refcount < 0)
434 printf("cam_invalidate_periph: refcount < 0!!\n");
435 splx(s);
436 }
437
438 static void
439 camperiphfree(struct cam_periph *periph)
440 {
441 int s;
442 struct periph_driver **p_drv;
443
444 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
445 if (strcmp((*p_drv)->driver_name, periph->periph_name) == 0)
446 break;
447 }
448 if (*p_drv == NULL) {
449 printf("camperiphfree: attempt to free non-existant periph\n");
450 return;
451 }
452
453 if (periph->periph_dtor != NULL)
454 periph->periph_dtor(periph);
455
456 s = splsoftcam();
457 TAILQ_REMOVE(&(*p_drv)->units, periph, unit_links);
458 (*p_drv)->generation++;
459 splx(s);
460
461 xpt_remove_periph(periph);
462
463 if (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) {
464 union ccb ccb;
465 void *arg;
466
467 switch (periph->deferred_ac) {
468 case AC_FOUND_DEVICE:
469 ccb.ccb_h.func_code = XPT_GDEV_TYPE;
470 xpt_setup_ccb(&ccb.ccb_h, periph->path, /*priority*/ 1);
471 xpt_action(&ccb);
472 arg = &ccb;
473 break;
474 case AC_PATH_REGISTERED:
475 ccb.ccb_h.func_code = XPT_PATH_INQ;
476 xpt_setup_ccb(&ccb.ccb_h, periph->path, /*priority*/ 1);
477 xpt_action(&ccb);
478 arg = &ccb;
479 break;
480 default:
481 arg = NULL;
482 break;
483 }
484 periph->deferred_callback(NULL, periph->deferred_ac,
485 periph->path, arg);
486 }
487 xpt_free_path(periph->path);
488 free(periph, M_DEVBUF);
489 }
490
491 /*
492 * Wait interruptibly for an exclusive lock.
493 */
494 int
495 cam_periph_lock(struct cam_periph *periph, int priority)
496 {
497 int error;
498
499 /*
500 * Increment the reference count on the peripheral
501 * while we wait for our lock attempt to succeed
502 * to ensure the peripheral doesn't disappear out
503 * from under us while we sleep.
504 */
505 if (cam_periph_acquire(periph) != CAM_REQ_CMP)
506 return(ENXIO);
507
508 while ((periph->flags & CAM_PERIPH_LOCKED) != 0) {
509 periph->flags |= CAM_PERIPH_LOCK_WANTED;
510 if ((error = tsleep(periph, priority, "caplck", 0)) != 0) {
511 cam_periph_release(periph);
512 return error;
513 }
514 }
515
516 periph->flags |= CAM_PERIPH_LOCKED;
517 return 0;
518 }
519
520 /*
521 * Unlock and wake up any waiters.
522 */
523 void
524 cam_periph_unlock(struct cam_periph *periph)
525 {
526 periph->flags &= ~CAM_PERIPH_LOCKED;
527 if ((periph->flags & CAM_PERIPH_LOCK_WANTED) != 0) {
528 periph->flags &= ~CAM_PERIPH_LOCK_WANTED;
529 wakeup(periph);
530 }
531
532 cam_periph_release(periph);
533 }
534
535 /*
536 * Map user virtual pointers into kernel virtual address space, so we can
537 * access the memory. This won't work on physical pointers, for now it's
538 * up to the caller to check for that. (XXX KDM -- should we do that here
539 * instead?) This also only works for up to MAXPHYS memory. Since we use
540 * buffers to map stuff in and out, we're limited to the buffer size.
541 */
542 int
543 cam_periph_mapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo)
544 {
545 int numbufs, i, j;
546 int flags[CAM_PERIPH_MAXMAPS];
547 u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS];
548 u_int32_t lengths[CAM_PERIPH_MAXMAPS];
549 u_int32_t dirs[CAM_PERIPH_MAXMAPS];
550
551 switch(ccb->ccb_h.func_code) {
552 case XPT_DEV_MATCH:
553 if (ccb->cdm.match_buf_len == 0) {
554 printf("cam_periph_mapmem: invalid match buffer "
555 "length 0\n");
556 return(EINVAL);
557 }
558 if (ccb->cdm.pattern_buf_len > 0) {
559 data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns;
560 lengths[0] = ccb->cdm.pattern_buf_len;
561 dirs[0] = CAM_DIR_OUT;
562 data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches;
563 lengths[1] = ccb->cdm.match_buf_len;
564 dirs[1] = CAM_DIR_IN;
565 numbufs = 2;
566 } else {
567 data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches;
568 lengths[0] = ccb->cdm.match_buf_len;
569 dirs[0] = CAM_DIR_IN;
570 numbufs = 1;
571 }
572 break;
573 case XPT_SCSI_IO:
574 case XPT_CONT_TARGET_IO:
575 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
576 return(0);
577
578 data_ptrs[0] = &ccb->csio.data_ptr;
579 lengths[0] = ccb->csio.dxfer_len;
580 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
581 numbufs = 1;
582 break;
583 default:
584 return(EINVAL);
585 break; /* NOTREACHED */
586 }
587
588 /*
589 * Check the transfer length and permissions first, so we don't
590 * have to unmap any previously mapped buffers.
591 */
592 for (i = 0; i < numbufs; i++) {
593
594 flags[i] = 0;
595
596 /*
597 * The userland data pointer passed in may not be page
598 * aligned. vmapbuf() truncates the address to a page
599 * boundary, so if the address isn't page aligned, we'll
600 * need enough space for the given transfer length, plus
601 * whatever extra space is necessary to make it to the page
602 * boundary.
603 */
604 if ((lengths[i] +
605 (((vm_offset_t)(*data_ptrs[i])) & PAGE_MASK)) > DFLTPHYS){
606 printf("cam_periph_mapmem: attempt to map %lu bytes, "
607 "which is greater than DFLTPHYS(%d)\n",
608 (long)(lengths[i] +
609 (((vm_offset_t)(*data_ptrs[i])) & PAGE_MASK)),
610 DFLTPHYS);
611 return(E2BIG);
612 }
613
614 if (dirs[i] & CAM_DIR_OUT) {
615 flags[i] = BIO_WRITE;
616 }
617
618 if (dirs[i] & CAM_DIR_IN) {
619 flags[i] = BIO_READ;
620 }
621
622 }
623
624 /* this keeps the current process from getting swapped */
625 /*
626 * XXX KDM should I use P_NOSWAP instead?
627 */
628 PHOLD(curproc);
629
630 for (i = 0; i < numbufs; i++) {
631 /*
632 * Get the buffer.
633 */
634 mapinfo->bp[i] = getpbuf(NULL);
635
636 /* save the buffer's data address */
637 mapinfo->bp[i]->b_saveaddr = mapinfo->bp[i]->b_data;
638
639 /* put our pointer in the data slot */
640 mapinfo->bp[i]->b_data = *data_ptrs[i];
641
642 /* set the transfer length, we know it's < DFLTPHYS */
643 mapinfo->bp[i]->b_bufsize = lengths[i];
644
645 /* set the direction */
646 mapinfo->bp[i]->b_iocmd = flags[i];
647
648 /*
649 * Map the buffer into kernel memory.
650 *
651 * Note that useracc() alone is not a sufficient test.
652 * vmapbuf() can still fail due to a smaller file mapped
653 * into a larger area of VM, or if userland races against
654 * vmapbuf() after the useracc() check.
655 */
656 if (vmapbuf(mapinfo->bp[i]) < 0) {
657 for (j = 0; j < i; ++j) {
658 *data_ptrs[j] = mapinfo->bp[j]->b_saveaddr;
659 vunmapbuf(mapinfo->bp[j]);
660 relpbuf(mapinfo->bp[j], NULL);
661 }
662 relpbuf(mapinfo->bp[i], NULL);
663 PRELE(curproc);
664 return(EACCES);
665 }
666
667 /* set our pointer to the new mapped area */
668 *data_ptrs[i] = mapinfo->bp[i]->b_data;
669
670 mapinfo->num_bufs_used++;
671 }
672
673 return(0);
674 }
675
676 /*
677 * Unmap memory segments mapped into kernel virtual address space by
678 * cam_periph_mapmem().
679 */
680 void
681 cam_periph_unmapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo)
682 {
683 int numbufs, i;
684 u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS];
685
686 if (mapinfo->num_bufs_used <= 0) {
687 /* allow ourselves to be swapped once again */
688 PRELE(curproc);
689 return;
690 }
691
692 switch (ccb->ccb_h.func_code) {
693 case XPT_DEV_MATCH:
694 numbufs = min(mapinfo->num_bufs_used, 2);
695
696 if (numbufs == 1) {
697 data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches;
698 } else {
699 data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns;
700 data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches;
701 }
702 break;
703 case XPT_SCSI_IO:
704 case XPT_CONT_TARGET_IO:
705 data_ptrs[0] = &ccb->csio.data_ptr;
706 numbufs = min(mapinfo->num_bufs_used, 1);
707 break;
708 default:
709 /* allow ourselves to be swapped once again */
710 PRELE(curproc);
711 return;
712 break; /* NOTREACHED */
713 }
714
715 for (i = 0; i < numbufs; i++) {
716 /* Set the user's pointer back to the original value */
717 *data_ptrs[i] = mapinfo->bp[i]->b_saveaddr;
718
719 /* unmap the buffer */
720 vunmapbuf(mapinfo->bp[i]);
721
722 /* release the buffer */
723 relpbuf(mapinfo->bp[i], NULL);
724 }
725
726 /* allow ourselves to be swapped once again */
727 PRELE(curproc);
728 }
729
730 union ccb *
731 cam_periph_getccb(struct cam_periph *periph, u_int32_t priority)
732 {
733 struct ccb_hdr *ccb_h;
734 int s;
735
736 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("entering cdgetccb\n"));
737
738 s = splsoftcam();
739
740 while (SLIST_FIRST(&periph->ccb_list) == NULL) {
741 if (periph->immediate_priority > priority)
742 periph->immediate_priority = priority;
743 xpt_schedule(periph, priority);
744 if ((SLIST_FIRST(&periph->ccb_list) != NULL)
745 && (SLIST_FIRST(&periph->ccb_list)->pinfo.priority == priority))
746 break;
747 tsleep(&periph->ccb_list, PRIBIO, "cgticb", 0);
748 }
749
750 ccb_h = SLIST_FIRST(&periph->ccb_list);
751 SLIST_REMOVE_HEAD(&periph->ccb_list, periph_links.sle);
752 splx(s);
753 return ((union ccb *)ccb_h);
754 }
755
756 void
757 cam_periph_ccbwait(union ccb *ccb)
758 {
759 int s;
760
761 s = splsoftcam();
762 if ((ccb->ccb_h.pinfo.index != CAM_UNQUEUED_INDEX)
763 || ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG))
764 tsleep(&ccb->ccb_h.cbfcnp, PRIBIO, "cbwait", 0);
765
766 splx(s);
767 }
768
769 int
770 cam_periph_ioctl(struct cam_periph *periph, int cmd, caddr_t addr,
771 int (*error_routine)(union ccb *ccb,
772 cam_flags camflags,
773 u_int32_t sense_flags))
774 {
775 union ccb *ccb;
776 int error;
777 int found;
778
779 error = found = 0;
780
781 switch(cmd){
782 case CAMGETPASSTHRU:
783 ccb = cam_periph_getccb(periph, /* priority */ 1);
784 xpt_setup_ccb(&ccb->ccb_h,
785 ccb->ccb_h.path,
786 /*priority*/1);
787 ccb->ccb_h.func_code = XPT_GDEVLIST;
788
789 /*
790 * Basically, the point of this is that we go through
791 * getting the list of devices, until we find a passthrough
792 * device. In the current version of the CAM code, the
793 * only way to determine what type of device we're dealing
794 * with is by its name.
795 */
796 while (found == 0) {
797 ccb->cgdl.index = 0;
798 ccb->cgdl.status = CAM_GDEVLIST_MORE_DEVS;
799 while (ccb->cgdl.status == CAM_GDEVLIST_MORE_DEVS) {
800
801 /* we want the next device in the list */
802 xpt_action(ccb);
803 if (strncmp(ccb->cgdl.periph_name,
804 "pass", 4) == 0){
805 found = 1;
806 break;
807 }
808 }
809 if ((ccb->cgdl.status == CAM_GDEVLIST_LAST_DEVICE) &&
810 (found == 0)) {
811 ccb->cgdl.periph_name[0] = '\0';
812 ccb->cgdl.unit_number = 0;
813 break;
814 }
815 }
816
817 /* copy the result back out */
818 bcopy(ccb, addr, sizeof(union ccb));
819
820 /* and release the ccb */
821 xpt_release_ccb(ccb);
822
823 break;
824 default:
825 error = ENOTTY;
826 break;
827 }
828 return(error);
829 }
830
831 int
832 cam_periph_runccb(union ccb *ccb,
833 int (*error_routine)(union ccb *ccb,
834 cam_flags camflags,
835 u_int32_t sense_flags),
836 cam_flags camflags, u_int32_t sense_flags,
837 struct devstat *ds)
838 {
839 int error;
840
841 error = 0;
842
843 /*
844 * If the user has supplied a stats structure, and if we understand
845 * this particular type of ccb, record the transaction start.
846 */
847 if ((ds != NULL) && (ccb->ccb_h.func_code == XPT_SCSI_IO))
848 devstat_start_transaction(ds, NULL);
849
850 xpt_action(ccb);
851
852 do {
853 cam_periph_ccbwait(ccb);
854 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP)
855 error = 0;
856 else if (error_routine != NULL)
857 error = (*error_routine)(ccb, camflags, sense_flags);
858 else
859 error = 0;
860
861 } while (error == ERESTART);
862
863 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
864 cam_release_devq(ccb->ccb_h.path,
865 /* relsim_flags */0,
866 /* openings */0,
867 /* timeout */0,
868 /* getcount_only */ FALSE);
869
870 if ((ds != NULL) && (ccb->ccb_h.func_code == XPT_SCSI_IO))
871 devstat_end_transaction(ds,
872 ccb->csio.dxfer_len,
873 ccb->csio.tag_action & 0xf,
874 ((ccb->ccb_h.flags & CAM_DIR_MASK) ==
875 CAM_DIR_NONE) ? DEVSTAT_NO_DATA :
876 (ccb->ccb_h.flags & CAM_DIR_OUT) ?
877 DEVSTAT_WRITE :
878 DEVSTAT_READ, NULL, NULL);
879
880 return(error);
881 }
882
883 void
884 cam_freeze_devq(struct cam_path *path)
885 {
886 struct ccb_hdr ccb_h;
887
888 xpt_setup_ccb(&ccb_h, path, /*priority*/1);
889 ccb_h.func_code = XPT_NOOP;
890 ccb_h.flags = CAM_DEV_QFREEZE;
891 xpt_action((union ccb *)&ccb_h);
892 }
893
894 u_int32_t
895 cam_release_devq(struct cam_path *path, u_int32_t relsim_flags,
896 u_int32_t openings, u_int32_t timeout,
897 int getcount_only)
898 {
899 struct ccb_relsim crs;
900
901 xpt_setup_ccb(&crs.ccb_h, path,
902 /*priority*/1);
903 crs.ccb_h.func_code = XPT_REL_SIMQ;
904 crs.ccb_h.flags = getcount_only ? CAM_DEV_QFREEZE : 0;
905 crs.release_flags = relsim_flags;
906 crs.openings = openings;
907 crs.release_timeout = timeout;
908 xpt_action((union ccb *)&crs);
909 return (crs.qfrozen_cnt);
910 }
911
912 #define saved_ccb_ptr ppriv_ptr0
913 static void
914 camperiphdone(struct cam_periph *periph, union ccb *done_ccb)
915 {
916 union ccb *saved_ccb;
917 cam_status status;
918 int frozen;
919 int sense;
920 struct scsi_start_stop_unit *scsi_cmd;
921 u_int32_t relsim_flags, timeout;
922 u_int32_t qfrozen_cnt;
923 int xpt_done_ccb;
924
925 xpt_done_ccb = FALSE;
926 status = done_ccb->ccb_h.status;
927 frozen = (status & CAM_DEV_QFRZN) != 0;
928 sense = (status & CAM_AUTOSNS_VALID) != 0;
929 status &= CAM_STATUS_MASK;
930
931 timeout = 0;
932 relsim_flags = 0;
933 saved_ccb = (union ccb *)done_ccb->ccb_h.saved_ccb_ptr;
934
935 /*
936 * Unfreeze the queue once if it is already frozen..
937 */
938 if (frozen != 0) {
939 qfrozen_cnt = cam_release_devq(done_ccb->ccb_h.path,
940 /*relsim_flags*/0,
941 /*openings*/0,
942 /*timeout*/0,
943 /*getcount_only*/0);
944 }
945
946 switch (status) {
947 case CAM_REQ_CMP:
948 {
949 /*
950 * If we have successfully taken a device from the not
951 * ready to ready state, re-scan the device and re-get
952 * the inquiry information. Many devices (mostly disks)
953 * don't properly report their inquiry information unless
954 * they are spun up.
955 *
956 * If we manually retrieved sense into a CCB and got
957 * something other than "NO SENSE" send the updated CCB
958 * back to the client via xpt_done() to be processed via
959 * the error recovery code again.
960 */
961 if (done_ccb->ccb_h.func_code == XPT_SCSI_IO) {
962 scsi_cmd = (struct scsi_start_stop_unit *)
963 &done_ccb->csio.cdb_io.cdb_bytes;
964
965 if (scsi_cmd->opcode == START_STOP_UNIT)
966 xpt_async(AC_INQ_CHANGED,
967 done_ccb->ccb_h.path, NULL);
968 if (scsi_cmd->opcode == REQUEST_SENSE) {
969 u_int sense_key;
970
971 sense_key = saved_ccb->csio.sense_data.flags;
972 sense_key &= SSD_KEY;
973 if (sense_key != SSD_KEY_NO_SENSE) {
974 saved_ccb->ccb_h.status |=
975 CAM_AUTOSNS_VALID;
976 #if 0
977 xpt_print_path(saved_ccb->ccb_h.path);
978 printf("Recovered Sense\n");
979 scsi_sense_print(&saved_ccb->csio);
980 cam_error_print(saved_ccb, CAM_ESF_ALL,
981 CAM_EPF_ALL);
982 #endif
983 xpt_done_ccb = TRUE;
984 }
985 }
986 }
987 bcopy(done_ccb->ccb_h.saved_ccb_ptr, done_ccb,
988 sizeof(union ccb));
989
990 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG;
991
992 if (xpt_done_ccb == FALSE)
993 xpt_action(done_ccb);
994
995 break;
996 }
997 case CAM_SCSI_STATUS_ERROR:
998 scsi_cmd = (struct scsi_start_stop_unit *)
999 &done_ccb->csio.cdb_io.cdb_bytes;
1000 if (sense != 0) {
1001 struct ccb_getdev cgd;
1002 struct scsi_sense_data *sense;
1003 int error_code, sense_key, asc, ascq;
1004 scsi_sense_action err_action;
1005
1006 sense = &done_ccb->csio.sense_data;
1007 scsi_extract_sense(sense, &error_code,
1008 &sense_key, &asc, &ascq);
1009
1010 /*
1011 * Grab the inquiry data for this device.
1012 */
1013 xpt_setup_ccb(&cgd.ccb_h, done_ccb->ccb_h.path,
1014 /*priority*/ 1);
1015 cgd.ccb_h.func_code = XPT_GDEV_TYPE;
1016 xpt_action((union ccb *)&cgd);
1017 err_action = scsi_error_action(&done_ccb->csio,
1018 &cgd.inq_data, 0);
1019
1020 /*
1021 * If the error is "invalid field in CDB",
1022 * and the load/eject flag is set, turn the
1023 * flag off and try again. This is just in
1024 * case the drive in question barfs on the
1025 * load eject flag. The CAM code should set
1026 * the load/eject flag by default for
1027 * removable media.
1028 */
1029
1030 /* XXX KDM
1031 * Should we check to see what the specific
1032 * scsi status is?? Or does it not matter
1033 * since we already know that there was an
1034 * error, and we know what the specific
1035 * error code was, and we know what the
1036 * opcode is..
1037 */
1038 if ((scsi_cmd->opcode == START_STOP_UNIT) &&
1039 ((scsi_cmd->how & SSS_LOEJ) != 0) &&
1040 (asc == 0x24) && (ascq == 0x00) &&
1041 (done_ccb->ccb_h.retry_count > 0)) {
1042
1043 scsi_cmd->how &= ~SSS_LOEJ;
1044
1045 xpt_action(done_ccb);
1046
1047 } else if ((done_ccb->ccb_h.retry_count > 1)
1048 && ((err_action & SS_MASK) != SS_FAIL)) {
1049
1050 /*
1051 * In this case, the error recovery
1052 * command failed, but we've got
1053 * some retries left on it. Give
1054 * it another try unless this is an
1055 * unretryable error.
1056 */
1057
1058 /* set the timeout to .5 sec */
1059 relsim_flags =
1060 RELSIM_RELEASE_AFTER_TIMEOUT;
1061 timeout = 500;
1062
1063 xpt_action(done_ccb);
1064
1065 break;
1066
1067 } else {
1068 /*
1069 * Perform the final retry with the original
1070 * CCB so that final error processing is
1071 * performed by the owner of the CCB.
1072 */
1073 bcopy(done_ccb->ccb_h.saved_ccb_ptr,
1074 done_ccb, sizeof(union ccb));
1075
1076 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG;
1077
1078 xpt_action(done_ccb);
1079 }
1080 } else {
1081 /*
1082 * Eh?? The command failed, but we don't
1083 * have any sense. What's up with that?
1084 * Fire the CCB again to return it to the
1085 * caller.
1086 */
1087 bcopy(done_ccb->ccb_h.saved_ccb_ptr,
1088 done_ccb, sizeof(union ccb));
1089
1090 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG;
1091
1092 xpt_action(done_ccb);
1093
1094 }
1095 break;
1096 default:
1097 bcopy(done_ccb->ccb_h.saved_ccb_ptr, done_ccb,
1098 sizeof(union ccb));
1099
1100 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG;
1101
1102 xpt_action(done_ccb);
1103
1104 break;
1105 }
1106
1107 /* decrement the retry count */
1108 /*
1109 * XXX This isn't appropriate in all cases. Restructure,
1110 * so that the retry count is only decremented on an
1111 * actual retry. Remeber that the orignal ccb had its
1112 * retry count dropped before entering recovery, so
1113 * doing it again is a bug.
1114 */
1115 if (done_ccb->ccb_h.retry_count > 0)
1116 done_ccb->ccb_h.retry_count--;
1117
1118 qfrozen_cnt = cam_release_devq(done_ccb->ccb_h.path,
1119 /*relsim_flags*/relsim_flags,
1120 /*openings*/0,
1121 /*timeout*/timeout,
1122 /*getcount_only*/0);
1123 if (xpt_done_ccb == TRUE)
1124 (*done_ccb->ccb_h.cbfcnp)(periph, done_ccb);
1125 }
1126
1127 /*
1128 * Generic Async Event handler. Peripheral drivers usually
1129 * filter out the events that require personal attention,
1130 * and leave the rest to this function.
1131 */
1132 void
1133 cam_periph_async(struct cam_periph *periph, u_int32_t code,
1134 struct cam_path *path, void *arg)
1135 {
1136 switch (code) {
1137 case AC_LOST_DEVICE:
1138 cam_periph_invalidate(periph);
1139 break;
1140 case AC_SENT_BDR:
1141 case AC_BUS_RESET:
1142 {
1143 cam_periph_bus_settle(periph, scsi_delay);
1144 break;
1145 }
1146 default:
1147 break;
1148 }
1149 }
1150
1151 void
1152 cam_periph_bus_settle(struct cam_periph *periph, u_int bus_settle)
1153 {
1154 struct ccb_getdevstats cgds;
1155
1156 xpt_setup_ccb(&cgds.ccb_h, periph->path, /*priority*/1);
1157 cgds.ccb_h.func_code = XPT_GDEV_STATS;
1158 xpt_action((union ccb *)&cgds);
1159 cam_periph_freeze_after_event(periph, &cgds.last_reset, bus_settle);
1160 }
1161
1162 void
1163 cam_periph_freeze_after_event(struct cam_periph *periph,
1164 struct timeval* event_time, u_int duration_ms)
1165 {
1166 struct timeval delta;
1167 struct timeval duration_tv;
1168 int s;
1169
1170 s = splclock();
1171 microtime(&delta);
1172 splx(s);
1173 timevalsub(&delta, event_time);
1174 duration_tv.tv_sec = duration_ms / 1000;
1175 duration_tv.tv_usec = (duration_ms % 1000) * 1000;
1176 if (timevalcmp(&delta, &duration_tv, <)) {
1177 timevalsub(&duration_tv, &delta);
1178
1179 duration_ms = duration_tv.tv_sec * 1000;
1180 duration_ms += duration_tv.tv_usec / 1000;
1181 cam_freeze_devq(periph->path);
1182 cam_release_devq(periph->path,
1183 RELSIM_RELEASE_AFTER_TIMEOUT,
1184 /*reduction*/0,
1185 /*timeout*/duration_ms,
1186 /*getcount_only*/0);
1187 }
1188
1189 }
1190
1191 static int
1192 camperiphscsistatuserror(union ccb *ccb, cam_flags camflags,
1193 u_int32_t sense_flags, union ccb *save_ccb,
1194 int *openings, u_int32_t *relsim_flags,
1195 u_int32_t *timeout)
1196 {
1197 int error;
1198
1199 switch (ccb->csio.scsi_status) {
1200 case SCSI_STATUS_OK:
1201 case SCSI_STATUS_COND_MET:
1202 case SCSI_STATUS_INTERMED:
1203 case SCSI_STATUS_INTERMED_COND_MET:
1204 error = 0;
1205 break;
1206 case SCSI_STATUS_CMD_TERMINATED:
1207 case SCSI_STATUS_CHECK_COND:
1208 error = camperiphscsisenseerror(ccb,
1209 camflags,
1210 sense_flags,
1211 save_ccb,
1212 openings,
1213 relsim_flags,
1214 timeout);
1215 break;
1216 case SCSI_STATUS_QUEUE_FULL:
1217 {
1218 /* no decrement */
1219 struct ccb_getdevstats cgds;
1220
1221 /*
1222 * First off, find out what the current
1223 * transaction counts are.
1224 */
1225 xpt_setup_ccb(&cgds.ccb_h,
1226 ccb->ccb_h.path,
1227 /*priority*/1);
1228 cgds.ccb_h.func_code = XPT_GDEV_STATS;
1229 xpt_action((union ccb *)&cgds);
1230
1231 /*
1232 * If we were the only transaction active, treat
1233 * the QUEUE FULL as if it were a BUSY condition.
1234 */
1235 if (cgds.dev_active != 0) {
1236 int total_openings;
1237
1238 /*
1239 * Reduce the number of openings to
1240 * be 1 less than the amount it took
1241 * to get a queue full bounded by the
1242 * minimum allowed tag count for this
1243 * device.
1244 */
1245 total_openings = cgds.dev_active + cgds.dev_openings;
1246 *openings = cgds.dev_active;
1247 if (*openings < cgds.mintags)
1248 *openings = cgds.mintags;
1249 if (*openings < total_openings)
1250 *relsim_flags = RELSIM_ADJUST_OPENINGS;
1251 else {
1252 /*
1253 * Some devices report queue full for
1254 * temporary resource shortages. For
1255 * this reason, we allow a minimum
1256 * tag count to be entered via a
1257 * quirk entry to prevent the queue
1258 * count on these devices from falling
1259 * to a pessimisticly low value. We
1260 * still wait for the next successful
1261 * completion, however, before queueing
1262 * more transactions to the device.
1263 */
1264 *relsim_flags = RELSIM_RELEASE_AFTER_CMDCMPLT;
1265 }
1266 *timeout = 0;
1267 error = ERESTART;
1268 if (bootverbose) {
1269 xpt_print_path(ccb->ccb_h.path);
1270 printf("Queue Full\n");
1271 }
1272 break;
1273 }
1274 /* FALLTHROUGH */
1275 }
1276 case SCSI_STATUS_BUSY:
1277 /*
1278 * Restart the queue after either another
1279 * command completes or a 1 second timeout.
1280 */
1281 if (bootverbose) {
1282 xpt_print_path(ccb->ccb_h.path);
1283 printf("Device Busy\n");
1284 }
1285 if (ccb->ccb_h.retry_count > 0) {
1286 ccb->ccb_h.retry_count--;
1287 error = ERESTART;
1288 *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT
1289 | RELSIM_RELEASE_AFTER_CMDCMPLT;
1290 *timeout = 1000;
1291 } else {
1292 error = EIO;
1293 }
1294 break;
1295 case SCSI_STATUS_RESERV_CONFLICT:
1296 xpt_print_path(ccb->ccb_h.path);
1297 printf("Reservation Conflict\n");
1298 error = EIO;
1299 break;
1300 default:
1301 xpt_print_path(ccb->ccb_h.path);
1302 printf("SCSI Status 0x%x\n", ccb->csio.scsi_status);
1303 error = EIO;
1304 break;
1305 }
1306 return (error);
1307 }
1308
1309 static int
1310 camperiphscsisenseerror(union ccb *ccb, cam_flags camflags,
1311 u_int32_t sense_flags, union ccb *save_ccb,
1312 int *openings, u_int32_t *relsim_flags,
1313 u_int32_t *timeout)
1314 {
1315 struct cam_periph *periph;
1316 int error;
1317
1318 periph = xpt_path_periph(ccb->ccb_h.path);
1319 if (periph->flags & CAM_PERIPH_RECOVERY_INPROG) {
1320
1321 /*
1322 * If error recovery is already in progress, don't attempt
1323 * to process this error, but requeue it unconditionally
1324 * and attempt to process it once error recovery has
1325 * completed. This failed command is probably related to
1326 * the error that caused the currently active error recovery
1327 * action so our current recovery efforts should also
1328 * address this command. Be aware that the error recovery
1329 * code assumes that only one recovery action is in progress
1330 * on a particular peripheral instance at any given time
1331 * (e.g. only one saved CCB for error recovery) so it is
1332 * imperitive that we don't violate this assumption.
1333 */
1334 error = ERESTART;
1335 } else {
1336 scsi_sense_action err_action;
1337 struct ccb_getdev cgd;
1338 const char *action_string;
1339 union ccb* print_ccb;
1340
1341 /* A description of the error recovery action performed */
1342 action_string = NULL;
1343
1344 /*
1345 * The location of the orignal ccb
1346 * for sense printing purposes.
1347 */
1348 print_ccb = ccb;
1349
1350 /*
1351 * Grab the inquiry data for this device.
1352 */
1353 xpt_setup_ccb(&cgd.ccb_h, ccb->ccb_h.path, /*priority*/ 1);
1354 cgd.ccb_h.func_code = XPT_GDEV_TYPE;
1355 xpt_action((union ccb *)&cgd);
1356
1357 if ((ccb->ccb_h.status & CAM_AUTOSNS_VALID) != 0)
1358 err_action = scsi_error_action(&ccb->csio,
1359 &cgd.inq_data,
1360 sense_flags);
1361 else if ((ccb->ccb_h.flags & CAM_DIS_AUTOSENSE) == 0)
1362 err_action = SS_REQSENSE;
1363 else
1364 err_action = SS_RETRY|SSQ_DECREMENT_COUNT|EIO;
1365
1366 error = err_action & SS_ERRMASK;
1367
1368 /*
1369 * If the recovery action will consume a retry,
1370 * make sure we actually have retries available.
1371 */
1372 if ((err_action & SSQ_DECREMENT_COUNT) != 0) {
1373 if (ccb->ccb_h.retry_count > 0)
1374 ccb->ccb_h.retry_count--;
1375 else {
1376 action_string = "Retries Exhausted";
1377 goto sense_error_done;
1378 }
1379 }
1380
1381 if ((err_action & SS_MASK) >= SS_START) {
1382 /*
1383 * Do common portions of commands that
1384 * use recovery CCBs.
1385 */
1386 if (save_ccb == NULL) {
1387 action_string = "No recovery CCB supplied";
1388 goto sense_error_done;
1389 }
1390 bcopy(ccb, save_ccb, sizeof(*save_ccb));
1391 print_ccb = save_ccb;
1392 periph->flags |= CAM_PERIPH_RECOVERY_INPROG;
1393 }
1394
1395 switch (err_action & SS_MASK) {
1396 case SS_NOP:
1397 action_string = "No Recovery Action Needed";
1398 error = 0;
1399 break;
1400 case SS_RETRY:
1401 action_string = "Retrying Command (per Sense Data)";
1402 error = ERESTART;
1403 break;
1404 case SS_FAIL:
1405 action_string = "Unretryable error";
1406 break;
1407 case SS_START:
1408 {
1409 int le;
1410
1411 /*
1412 * Send a start unit command to the device, and
1413 * then retry the command.
1414 */
1415 action_string = "Attempting to Start Unit";
1416
1417 /*
1418 * Check for removable media and set
1419 * load/eject flag appropriately.
1420 */
1421 if (SID_IS_REMOVABLE(&cgd.inq_data))
1422 le = TRUE;
1423 else
1424 le = FALSE;
1425
1426 scsi_start_stop(&ccb->csio,
1427 /*retries*/1,
1428 camperiphdone,
1429 MSG_SIMPLE_Q_TAG,
1430 /*start*/TRUE,
1431 /*load/eject*/le,
1432 /*immediate*/FALSE,
1433 SSD_FULL_SIZE,
1434 /*timeout*/50000);
1435 break;
1436 }
1437 case SS_TUR:
1438 {
1439 /*
1440 * Send a Test Unit Ready to the device.
1441 * If the 'many' flag is set, we send 120
1442 * test unit ready commands, one every half
1443 * second. Otherwise, we just send one TUR.
1444 * We only want to do this if the retry
1445 * count has not been exhausted.
1446 */
1447 int retries;
1448
1449 if ((err_action & SSQ_MANY) != 0) {
1450 action_string = "Polling device for readiness";
1451 retries = 120;
1452 } else {
1453 action_string = "Testing device for readiness";
1454 retries = 1;
1455 }
1456 scsi_test_unit_ready(&ccb->csio,
1457 retries,
1458 camperiphdone,
1459 MSG_SIMPLE_Q_TAG,
1460 SSD_FULL_SIZE,
1461 /*timeout*/5000);
1462
1463 /*
1464 * Accomplish our 500ms delay by deferring
1465 * the release of our device queue appropriately.
1466 */
1467 *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
1468 *timeout = 500;
1469 break;
1470 }
1471 case SS_REQSENSE:
1472 {
1473 /*
1474 * Send a Request Sense to the device. We
1475 * assume that we are in a contingent allegiance
1476 * condition so we do not tag this request.
1477 */
1478 scsi_request_sense(&ccb->csio, /*retries*/1,
1479 camperiphdone,
1480 &save_ccb->csio.sense_data,
1481 sizeof(save_ccb->csio.sense_data),
1482 CAM_TAG_ACTION_NONE,
1483 /*sense_len*/SSD_FULL_SIZE,
1484 /*timeout*/5000);
1485 break;
1486 }
1487 default:
1488 panic("Unhandled error action %x", err_action);
1489 }
1490
1491 if ((err_action & SS_MASK) >= SS_START) {
1492 /*
1493 * Drop the priority to 0 so that the recovery
1494 * CCB is the first to execute. Freeze the queue
1495 * after this command is sent so that we can
1496 * restore the old csio and have it queued in
1497 * the proper order before we release normal
1498 * transactions to the device.
1499 */
1500 ccb->ccb_h.pinfo.priority = 0;
1501 ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
1502 ccb->ccb_h.saved_ccb_ptr = save_ccb;
1503 error = ERESTART;
1504 }
1505
1506 sense_error_done:
1507 if ((err_action & SSQ_PRINT_SENSE) != 0
1508 && (ccb->ccb_h.status & CAM_AUTOSNS_VALID) != 0) {
1509 cam_error_print(print_ccb, CAM_ESF_ALL, CAM_EPF_ALL);
1510 xpt_print_path(ccb->ccb_h.path);
1511 if (bootverbose)
1512 scsi_sense_print(&print_ccb->csio);
1513 printf("%s\n", action_string);
1514 }
1515 }
1516 return (error);
1517 }
1518
1519 /*
1520 * Generic error handler. Peripheral drivers usually filter
1521 * out the errors that they handle in a unique mannor, then
1522 * call this function.
1523 */
1524 int
1525 cam_periph_error(union ccb *ccb, cam_flags camflags,
1526 u_int32_t sense_flags, union ccb *save_ccb)
1527 {
1528 const char *action_string;
1529 cam_status status;
1530 int frozen;
1531 int error, printed = 0;
1532 int openings;
1533 u_int32_t relsim_flags;
1534 u_int32_t timeout;
1535
1536 action_string = NULL;
1537 status = ccb->ccb_h.status;
1538 frozen = (status & CAM_DEV_QFRZN) != 0;
1539 status &= CAM_STATUS_MASK;
1540 openings = relsim_flags = 0;
1541
1542 switch (status) {
1543 case CAM_REQ_CMP:
1544 error = 0;
1545 break;
1546 case CAM_SCSI_STATUS_ERROR:
1547 error = camperiphscsistatuserror(ccb,
1548 camflags,
1549 sense_flags,
1550 save_ccb,
1551 &openings,
1552 &relsim_flags,
1553 &timeout);
1554 break;
1555 case CAM_AUTOSENSE_FAIL:
1556 xpt_print_path(ccb->ccb_h.path);
1557 printf("AutoSense Failed\n");
1558 error = EIO; /* we have to kill the command */
1559 break;
1560 case CAM_REQ_CMP_ERR:
1561 if (bootverbose && printed == 0) {
1562 xpt_print_path(ccb->ccb_h.path);
1563 printf("Request completed with CAM_REQ_CMP_ERR\n");
1564 printed++;
1565 }
1566 /* FALLTHROUGH */
1567 case CAM_CMD_TIMEOUT:
1568 if (bootverbose && printed == 0) {
1569 xpt_print_path(ccb->ccb_h.path);
1570 printf("Command timed out\n");
1571 printed++;
1572 }
1573 /* FALLTHROUGH */
1574 case CAM_UNEXP_BUSFREE:
1575 if (bootverbose && printed == 0) {
1576 xpt_print_path(ccb->ccb_h.path);
1577 printf("Unexpected Bus Free\n");
1578 printed++;
1579 }
1580 /* FALLTHROUGH */
1581 case CAM_UNCOR_PARITY:
1582 if (bootverbose && printed == 0) {
1583 xpt_print_path(ccb->ccb_h.path);
1584 printf("Uncorrected Parity Error\n");
1585 printed++;
1586 }
1587 /* FALLTHROUGH */
1588 case CAM_DATA_RUN_ERR:
1589 if (bootverbose && printed == 0) {
1590 xpt_print_path(ccb->ccb_h.path);
1591 printf("Data Overrun\n");
1592 printed++;
1593 }
1594 error = EIO; /* we have to kill the command */
1595 /* decrement the number of retries */
1596 if (ccb->ccb_h.retry_count > 0) {
1597 ccb->ccb_h.retry_count--;
1598 error = ERESTART;
1599 } else {
1600 action_string = "Retries Exausted";
1601 error = EIO;
1602 }
1603 break;
1604 case CAM_UA_ABORT:
1605 case CAM_UA_TERMIO:
1606 case CAM_MSG_REJECT_REC:
1607 /* XXX Don't know that these are correct */
1608 error = EIO;
1609 break;
1610 case CAM_SEL_TIMEOUT:
1611 {
1612 struct cam_path *newpath;
1613
1614 if ((camflags & CAM_RETRY_SELTO) != 0) {
1615 if (ccb->ccb_h.retry_count > 0) {
1616
1617 ccb->ccb_h.retry_count--;
1618 error = ERESTART;
1619 if (bootverbose && printed == 0) {
1620 xpt_print_path(ccb->ccb_h.path);
1621 printf("Selection Timeout\n");
1622 printed++;
1623 }
1624
1625 /*
1626 * Wait a second to give the device
1627 * time to recover before we try again.
1628 */
1629 relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
1630 timeout = 1000;
1631 break;
1632 }
1633 }
1634 error = ENXIO;
1635 /* Should we do more if we can't create the path?? */
1636 if (xpt_create_path(&newpath, xpt_path_periph(ccb->ccb_h.path),
1637 xpt_path_path_id(ccb->ccb_h.path),
1638 xpt_path_target_id(ccb->ccb_h.path),
1639 CAM_LUN_WILDCARD) != CAM_REQ_CMP)
1640 break;
1641
1642 /*
1643 * Let peripheral drivers know that this device has gone
1644 * away.
1645 */
1646 xpt_async(AC_LOST_DEVICE, newpath, NULL);
1647 xpt_free_path(newpath);
1648 break;
1649 }
1650 case CAM_REQ_INVALID:
1651 case CAM_PATH_INVALID:
1652 case CAM_DEV_NOT_THERE:
1653 case CAM_NO_HBA:
1654 case CAM_PROVIDE_FAIL:
1655 case CAM_REQ_TOO_BIG:
1656 error = EINVAL;
1657 break;
1658 case CAM_SCSI_BUS_RESET:
1659 case CAM_BDR_SENT:
1660 /*
1661 * Commands that repeatedly timeout and cause these
1662 * kinds of error recovery actions, should return
1663 * CAM_CMD_TIMEOUT, which allows us to safely assume
1664 * that this command was an innocent bystander to
1665 * these events and should be unconditionally
1666 * retried.
1667 */
1668 if (bootverbose && printed == 0) {
1669 xpt_print_path(ccb->ccb_h.path);
1670 if (status == CAM_BDR_SENT)
1671 printf("Bus Device Reset sent\n");
1672 else
1673 printf("Bus Reset issued\n");
1674 printed++;
1675 }
1676 /* FALLTHROUGH */
1677 case CAM_REQUEUE_REQ:
1678 /* Unconditional requeue */
1679 error = ERESTART;
1680 if (bootverbose && printed == 0) {
1681 xpt_print_path(ccb->ccb_h.path);
1682 printf("Request Requeued\n");
1683 printed++;
1684 }
1685 break;
1686 case CAM_RESRC_UNAVAIL:
1687 case CAM_BUSY:
1688 /* timeout??? */
1689 default:
1690 /* decrement the number of retries */
1691 if (ccb->ccb_h.retry_count > 0) {
1692 ccb->ccb_h.retry_count--;
1693 error = ERESTART;
1694 if (bootverbose && printed == 0) {
1695 xpt_print_path(ccb->ccb_h.path);
1696 printf("CAM Status 0x%x\n", status);
1697 printed++;
1698 }
1699 } else {
1700 error = EIO;
1701 action_string = "Retries Exhausted";
1702 }
1703 break;
1704 }
1705
1706 /* Attempt a retry */
1707 if (error == ERESTART || error == 0) {
1708 if (frozen != 0)
1709 ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
1710
1711 if (error == ERESTART) {
1712 action_string = "Retrying Command";
1713 xpt_action(ccb);
1714 }
1715
1716 if (frozen != 0)
1717 cam_release_devq(ccb->ccb_h.path,
1718 relsim_flags,
1719 openings,
1720 timeout,
1721 /*getcount_only*/0);
1722 }
1723
1724 /*
1725 * If we have and error and are booting verbosely, whine
1726 * *unless* this was a non-retryable selection timeout.
1727 */
1728 if (error != 0 && bootverbose &&
1729 !(status == CAM_SEL_TIMEOUT && (camflags & CAM_RETRY_SELTO) == 0)) {
1730
1731
1732 if (action_string == NULL)
1733 action_string = "Unretryable Error";
1734 if (error != ERESTART) {
1735 xpt_print_path(ccb->ccb_h.path);
1736 printf("error %d\n", error);
1737 }
1738 xpt_print_path(ccb->ccb_h.path);
1739 printf("%s\n", action_string);
1740 }
1741
1742 return (error);
1743 }
Cache object: 7fa9a10530f990451667a66ed363cbcf
|