FreeBSD/Linux Kernel Cross Reference
sys/cam/cam_periph.c
1 /*
2 * Common functions for CAM "type" (peripheral) drivers.
3 *
4 * Copyright (c) 1997, 1998 Justin T. Gibbs.
5 * Copyright (c) 1997, 1998, 1999, 2000 Kenneth D. Merry.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions, and the following disclaimer,
13 * without modification, immediately at the beginning of the file.
14 * 2. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * $FreeBSD$
30 */
31
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/types.h>
35 #include <sys/malloc.h>
36 #include <sys/linker_set.h>
37 #include <sys/buf.h>
38 #include <sys/proc.h>
39 #include <sys/devicestat.h>
40 #include <vm/vm.h>
41 #include <vm/vm_extern.h>
42
43 #include <cam/cam.h>
44 #include <cam/cam_conf.h>
45 #include <cam/cam_ccb.h>
46 #include <cam/cam_xpt_periph.h>
47 #include <cam/cam_periph.h>
48 #include <cam/cam_debug.h>
49
50 #include <cam/scsi/scsi_all.h>
51 #include <cam/scsi/scsi_message.h>
52 #include <cam/scsi/scsi_da.h>
53 #include <cam/scsi/scsi_pass.h>
54
55 static u_int camperiphnextunit(struct periph_driver *p_drv,
56 u_int newunit, int wired,
57 path_id_t pathid, target_id_t target,
58 lun_id_t lun);
59 static u_int camperiphunit(struct periph_driver *p_drv,
60 path_id_t pathid, target_id_t target,
61 lun_id_t lun);
62 static void camperiphdone(struct cam_periph *periph,
63 union ccb *done_ccb);
64 static void camperiphfree(struct cam_periph *periph);
65
66 cam_status
67 cam_periph_alloc(periph_ctor_t *periph_ctor,
68 periph_oninv_t *periph_oninvalidate,
69 periph_dtor_t *periph_dtor, periph_start_t *periph_start,
70 char *name, cam_periph_type type, struct cam_path *path,
71 ac_callback_t *ac_callback, ac_code code, void *arg)
72 {
73 struct periph_driver **p_drv;
74 struct cam_periph *periph;
75 struct cam_periph *cur_periph;
76 path_id_t path_id;
77 target_id_t target_id;
78 lun_id_t lun_id;
79 cam_status status;
80 u_int init_level;
81 int s;
82
83 init_level = 0;
84 /*
85 * Handle Hot-Plug scenarios. If there is already a peripheral
86 * of our type assigned to this path, we are likely waiting for
87 * final close on an old, invalidated, peripheral. If this is
88 * the case, queue up a deferred call to the peripheral's async
89 * handler. If it looks like a mistaken re-alloation, complain.
90 */
91 if ((periph = cam_periph_find(path, name)) != NULL) {
92
93 if ((periph->flags & CAM_PERIPH_INVALID) != 0
94 && (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) == 0) {
95 periph->flags |= CAM_PERIPH_NEW_DEV_FOUND;
96 periph->deferred_callback = ac_callback;
97 periph->deferred_ac = code;
98 return (CAM_REQ_INPROG);
99 } else {
100 printf("cam_periph_alloc: attempt to re-allocate "
101 "valid device %s%d rejected\n",
102 periph->periph_name, periph->unit_number);
103 }
104 return (CAM_REQ_INVALID);
105 }
106
107 periph = (struct cam_periph *)malloc(sizeof(*periph), M_DEVBUF,
108 M_NOWAIT);
109
110 if (periph == NULL)
111 return (CAM_RESRC_UNAVAIL);
112
113 init_level++;
114
115 for (p_drv = (struct periph_driver **)periphdriver_set.ls_items;
116 *p_drv != NULL; p_drv++) {
117 if (strcmp((*p_drv)->driver_name, name) == 0)
118 break;
119 }
120
121 path_id = xpt_path_path_id(path);
122 target_id = xpt_path_target_id(path);
123 lun_id = xpt_path_lun_id(path);
124 bzero(periph, sizeof(*periph));
125 cam_init_pinfo(&periph->pinfo);
126 periph->periph_start = periph_start;
127 periph->periph_dtor = periph_dtor;
128 periph->periph_oninval = periph_oninvalidate;
129 periph->type = type;
130 periph->periph_name = name;
131 periph->unit_number = camperiphunit(*p_drv, path_id, target_id, lun_id);
132 periph->immediate_priority = CAM_PRIORITY_NONE;
133 periph->refcount = 0;
134 SLIST_INIT(&periph->ccb_list);
135 status = xpt_create_path(&path, periph, path_id, target_id, lun_id);
136 if (status != CAM_REQ_CMP)
137 goto failure;
138
139 periph->path = path;
140 init_level++;
141
142 status = xpt_add_periph(periph);
143
144 if (status != CAM_REQ_CMP)
145 goto failure;
146
147 s = splsoftcam();
148 cur_periph = TAILQ_FIRST(&(*p_drv)->units);
149 while (cur_periph != NULL
150 && cur_periph->unit_number < periph->unit_number)
151 cur_periph = TAILQ_NEXT(cur_periph, unit_links);
152
153 if (cur_periph != NULL)
154 TAILQ_INSERT_BEFORE(cur_periph, periph, unit_links);
155 else {
156 TAILQ_INSERT_TAIL(&(*p_drv)->units, periph, unit_links);
157 (*p_drv)->generation++;
158 }
159
160 splx(s);
161
162 init_level++;
163
164 status = periph_ctor(periph, arg);
165
166 if (status == CAM_REQ_CMP)
167 init_level++;
168
169 failure:
170 switch (init_level) {
171 case 4:
172 /* Initialized successfully */
173 break;
174 case 3:
175 s = splsoftcam();
176 TAILQ_REMOVE(&(*p_drv)->units, periph, unit_links);
177 splx(s);
178 xpt_remove_periph(periph);
179 case 2:
180 xpt_free_path(periph->path);
181 case 1:
182 free(periph, M_DEVBUF);
183 case 0:
184 /* No cleanup to perform. */
185 break;
186 default:
187 panic("cam_periph_alloc: Unkown init level");
188 }
189 return(status);
190 }
191
192 /*
193 * Find a peripheral structure with the specified path, target, lun,
194 * and (optionally) type. If the name is NULL, this function will return
195 * the first peripheral driver that matches the specified path.
196 */
197 struct cam_periph *
198 cam_periph_find(struct cam_path *path, char *name)
199 {
200 struct periph_driver **p_drv;
201 struct cam_periph *periph;
202 int s;
203
204 for (p_drv = (struct periph_driver **)periphdriver_set.ls_items;
205 *p_drv != NULL; p_drv++) {
206
207 if (name != NULL && (strcmp((*p_drv)->driver_name, name) != 0))
208 continue;
209
210 s = splsoftcam();
211 for (periph = TAILQ_FIRST(&(*p_drv)->units); periph != NULL;
212 periph = TAILQ_NEXT(periph, unit_links)) {
213 if (xpt_path_comp(periph->path, path) == 0) {
214 splx(s);
215 return(periph);
216 }
217 }
218 splx(s);
219 if (name != NULL)
220 return(NULL);
221 }
222 return(NULL);
223 }
224
225 cam_status
226 cam_periph_acquire(struct cam_periph *periph)
227 {
228 int s;
229
230 if (periph == NULL)
231 return(CAM_REQ_CMP_ERR);
232
233 s = splsoftcam();
234 periph->refcount++;
235 splx(s);
236
237 return(CAM_REQ_CMP);
238 }
239
240 void
241 cam_periph_release(struct cam_periph *periph)
242 {
243 int s;
244
245 if (periph == NULL)
246 return;
247
248 s = splsoftcam();
249 if ((--periph->refcount == 0)
250 && (periph->flags & CAM_PERIPH_INVALID)) {
251 camperiphfree(periph);
252 }
253 splx(s);
254
255 }
256
257 /*
258 * Look for the next unit number that is not currently in use for this
259 * peripheral type starting at "newunit". Also exclude unit numbers that
260 * are reserved by for future "hardwiring" unless we already know that this
261 * is a potential wired device. Only assume that the device is "wired" the
262 * first time through the loop since after that we'll be looking at unit
263 * numbers that did not match a wiring entry.
264 */
265 static u_int
266 camperiphnextunit(struct periph_driver *p_drv, u_int newunit, int wired,
267 path_id_t pathid, target_id_t target, lun_id_t lun)
268 {
269 struct cam_periph *periph;
270 struct cam_periph_config *periph_conf;
271 char *periph_name;
272 int s;
273
274 s = splsoftcam();
275 periph_name = p_drv->driver_name;
276 for (;;newunit++) {
277
278 for (periph = TAILQ_FIRST(&p_drv->units);
279 periph != NULL && periph->unit_number != newunit;
280 periph = TAILQ_NEXT(periph, unit_links))
281 ;
282
283 if (periph != NULL && periph->unit_number == newunit) {
284 if (wired != 0) {
285 xpt_print_path(periph->path);
286 printf("Duplicate Wired Device entry!\n");
287 xpt_print_path(periph->path);
288 printf("Second device (%s device at scbus%d "
289 "target %d lun %d) will not be wired\n",
290 periph_name, pathid, target, lun);
291 wired = 0;
292 }
293 continue;
294 }
295
296 for (periph_conf = cam_pinit;
297 wired == 0 && periph_conf->periph_name != NULL;
298 periph_conf++) {
299
300 /*
301 * Don't match entries like "da 4" as a wired down
302 * device, but do match entries like "da 4 target 5"
303 * or even "da 4 scbus 1".
304 */
305 if (IS_SPECIFIED(periph_conf->periph_unit)
306 && (!strcmp(periph_name, periph_conf->periph_name))
307 && (IS_SPECIFIED(periph_conf->target)
308 || IS_SPECIFIED(periph_conf->pathid))
309 && (newunit == periph_conf->periph_unit))
310 break;
311 }
312
313 if (wired != 0 || periph_conf->periph_name == NULL)
314 break;
315 }
316 splx(s);
317 return (newunit);
318 }
319
320 static u_int
321 camperiphunit(struct periph_driver *p_drv, path_id_t pathid,
322 target_id_t target, lun_id_t lun)
323 {
324 struct cam_periph_config *periph_conf;
325 u_int unit;
326 int hit;
327
328 unit = 0;
329 hit = 0;
330
331 for (periph_conf = cam_pinit;
332 periph_conf->periph_name != NULL;
333 periph_conf++, hit = 0) {
334
335 if (!strcmp(p_drv->driver_name, periph_conf->periph_name)
336 && IS_SPECIFIED(periph_conf->periph_unit)) {
337
338 if (IS_SPECIFIED(periph_conf->pathid)) {
339
340 if (pathid != periph_conf->pathid)
341 continue;
342 hit++;
343 }
344
345 if (IS_SPECIFIED(periph_conf->target)) {
346
347 if (target != periph_conf->target)
348 continue;
349 hit++;
350 }
351
352 if (IS_SPECIFIED(periph_conf->lun)) {
353
354 if (lun != periph_conf->lun)
355 continue;
356 hit++;
357 }
358
359 if (hit != 0) {
360 unit = periph_conf->periph_unit;
361 break;
362 }
363 }
364 }
365
366 /*
367 * Either start from 0 looking for the next unit or from
368 * the unit number given in the periph_conf. This way,
369 * if we have wildcard matches, we don't return the same
370 * unit number twice.
371 */
372 unit = camperiphnextunit(p_drv, unit, /*wired*/hit, pathid,
373 target, lun);
374
375 return (unit);
376 }
377
378 void
379 cam_periph_invalidate(struct cam_periph *periph)
380 {
381 int s;
382
383 s = splsoftcam();
384 /*
385 * We only call this routine the first time a peripheral is
386 * invalidated. The oninvalidate() routine is always called at
387 * splsoftcam().
388 */
389 if (((periph->flags & CAM_PERIPH_INVALID) == 0)
390 && (periph->periph_oninval != NULL))
391 periph->periph_oninval(periph);
392
393 periph->flags |= CAM_PERIPH_INVALID;
394 periph->flags &= ~CAM_PERIPH_NEW_DEV_FOUND;
395
396 if (periph->refcount == 0)
397 camperiphfree(periph);
398 else if (periph->refcount < 0)
399 printf("cam_invalidate_periph: refcount < 0!!\n");
400 splx(s);
401 }
402
403 static void
404 camperiphfree(struct cam_periph *periph)
405 {
406 int s;
407 struct periph_driver **p_drv;
408
409 for (p_drv = (struct periph_driver **)periphdriver_set.ls_items;
410 *p_drv != NULL; p_drv++) {
411 if (strcmp((*p_drv)->driver_name, periph->periph_name) == 0)
412 break;
413 }
414
415 if (periph->periph_dtor != NULL)
416 periph->periph_dtor(periph);
417
418 s = splsoftcam();
419 TAILQ_REMOVE(&(*p_drv)->units, periph, unit_links);
420 (*p_drv)->generation++;
421 splx(s);
422
423 xpt_remove_periph(periph);
424
425 if (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) {
426 union ccb ccb;
427 void *arg;
428
429 switch (periph->deferred_ac) {
430 case AC_FOUND_DEVICE:
431 ccb.ccb_h.func_code = XPT_GDEV_TYPE;
432 xpt_setup_ccb(&ccb.ccb_h, periph->path, /*priority*/ 1);
433 xpt_action(&ccb);
434 arg = &ccb;
435 break;
436 case AC_PATH_REGISTERED:
437 ccb.ccb_h.func_code = XPT_PATH_INQ;
438 xpt_setup_ccb(&ccb.ccb_h, periph->path, /*priority*/ 1);
439 xpt_action(&ccb);
440 arg = &ccb;
441 break;
442 default:
443 arg = NULL;
444 break;
445 }
446 periph->deferred_callback(NULL, periph->deferred_ac,
447 periph->path, arg);
448 }
449 xpt_free_path(periph->path);
450 free(periph, M_DEVBUF);
451 }
452
453 /*
454 * Wait interruptibly for an exclusive lock.
455 */
456 int
457 cam_periph_lock(struct cam_periph *periph, int priority)
458 {
459 int error;
460
461 while ((periph->flags & CAM_PERIPH_LOCKED) != 0) {
462 periph->flags |= CAM_PERIPH_LOCK_WANTED;
463 if ((error = tsleep(periph, priority, "caplck", 0)) != 0)
464 return error;
465 }
466
467 if (cam_periph_acquire(periph) != CAM_REQ_CMP)
468 return(ENXIO);
469
470 periph->flags |= CAM_PERIPH_LOCKED;
471 return 0;
472 }
473
474 /*
475 * Unlock and wake up any waiters.
476 */
477 void
478 cam_periph_unlock(struct cam_periph *periph)
479 {
480 periph->flags &= ~CAM_PERIPH_LOCKED;
481 if ((periph->flags & CAM_PERIPH_LOCK_WANTED) != 0) {
482 periph->flags &= ~CAM_PERIPH_LOCK_WANTED;
483 wakeup(periph);
484 }
485
486 cam_periph_release(periph);
487 }
488
489 /*
490 * Map user virtual pointers into kernel virtual address space, so we can
491 * access the memory. This won't work on physical pointers, for now it's
492 * up to the caller to check for that. (XXX KDM -- should we do that here
493 * instead?) This also only works for up to MAXPHYS memory. Since we use
494 * buffers to map stuff in and out, we're limited to the buffer size.
495 */
496 int
497 cam_periph_mapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo)
498 {
499 int numbufs, i;
500 int flags[CAM_PERIPH_MAXMAPS];
501 u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS];
502 u_int32_t lengths[CAM_PERIPH_MAXMAPS];
503 u_int32_t dirs[CAM_PERIPH_MAXMAPS];
504
505 switch(ccb->ccb_h.func_code) {
506 case XPT_DEV_MATCH:
507 if (ccb->cdm.match_buf_len == 0) {
508 printf("cam_periph_mapmem: invalid match buffer "
509 "length 0\n");
510 return(EINVAL);
511 }
512 if (ccb->cdm.pattern_buf_len > 0) {
513 data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns;
514 lengths[0] = ccb->cdm.pattern_buf_len;
515 dirs[0] = CAM_DIR_OUT;
516 data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches;
517 lengths[1] = ccb->cdm.match_buf_len;
518 dirs[1] = CAM_DIR_IN;
519 numbufs = 2;
520 } else {
521 data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches;
522 lengths[0] = ccb->cdm.match_buf_len;
523 dirs[0] = CAM_DIR_IN;
524 numbufs = 1;
525 }
526 break;
527 case XPT_SCSI_IO:
528 case XPT_CONT_TARGET_IO:
529 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
530 return(0);
531
532 data_ptrs[0] = &ccb->csio.data_ptr;
533 lengths[0] = ccb->csio.dxfer_len;
534 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
535 numbufs = 1;
536 break;
537 default:
538 return(EINVAL);
539 break; /* NOTREACHED */
540 }
541
542 /*
543 * Check the transfer length and permissions first, so we don't
544 * have to unmap any previously mapped buffers.
545 */
546 for (i = 0; i < numbufs; i++) {
547
548 flags[i] = 0;
549
550 /*
551 * The userland data pointer passed in may not be page
552 * aligned. vmapbuf() truncates the address to a page
553 * boundary, so if the address isn't page aligned, we'll
554 * need enough space for the given transfer length, plus
555 * whatever extra space is necessary to make it to the page
556 * boundary.
557 */
558 if ((lengths[i] +
559 (((vm_offset_t)(*data_ptrs[i])) & PAGE_MASK)) > DFLTPHYS){
560 printf("cam_periph_mapmem: attempt to map %u bytes, "
561 "which is greater than DFLTPHYS(%d)\n",
562 lengths[i] +
563 (((vm_offset_t)(*data_ptrs[i])) & PAGE_MASK),
564 DFLTPHYS);
565 return(E2BIG);
566 }
567
568 if (dirs[i] & CAM_DIR_OUT) {
569 flags[i] = B_WRITE;
570 if (useracc(*data_ptrs[i], lengths[i], B_READ) == 0){
571 printf("cam_periph_mapmem: error, "
572 "address %p, length %lu isn't "
573 "user accessible for READ\n",
574 (void *)*data_ptrs[i],
575 (u_long)lengths[i]);
576 return(EACCES);
577 }
578 }
579
580 /*
581 * XXX this check is really bogus, since B_WRITE currently
582 * is all 0's, and so it is "set" all the time.
583 */
584 if (dirs[i] & CAM_DIR_IN) {
585 flags[i] |= B_READ;
586 if (useracc(*data_ptrs[i], lengths[i], B_WRITE) == 0){
587 printf("cam_periph_mapmem: error, "
588 "address %p, length %lu isn't "
589 "user accessible for WRITE\n",
590 (void *)*data_ptrs[i],
591 (u_long)lengths[i]);
592
593 return(EACCES);
594 }
595 }
596
597 }
598
599 /* this keeps the current process from getting swapped */
600 /*
601 * XXX KDM should I use P_NOSWAP instead?
602 */
603 curproc->p_flag |= P_PHYSIO;
604
605 for (i = 0; i < numbufs; i++) {
606 /*
607 * Get the buffer.
608 */
609 mapinfo->bp[i] = getpbuf();
610
611 /* save the buffer's data address */
612 mapinfo->bp[i]->b_saveaddr = mapinfo->bp[i]->b_data;
613
614 /* put our pointer in the data slot */
615 mapinfo->bp[i]->b_data = *data_ptrs[i];
616
617 /* set the transfer length, we know it's < DFLTPHYS */
618 mapinfo->bp[i]->b_bufsize = lengths[i];
619
620 /* set the flags */
621 mapinfo->bp[i]->b_flags = flags[i] | B_PHYS | B_BUSY;
622
623 /* map the buffer into kernel memory */
624 vmapbuf(mapinfo->bp[i]);
625
626 /* set our pointer to the new mapped area */
627 *data_ptrs[i] = mapinfo->bp[i]->b_data;
628
629 mapinfo->num_bufs_used++;
630 }
631
632 return(0);
633 }
634
635 /*
636 * Unmap memory segments mapped into kernel virtual address space by
637 * cam_periph_mapmem().
638 */
639 void
640 cam_periph_unmapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo)
641 {
642 int numbufs, i;
643 u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS];
644
645 if (mapinfo->num_bufs_used <= 0) {
646 /* allow ourselves to be swapped once again */
647 curproc->p_flag &= ~P_PHYSIO;
648 return;
649 }
650
651 switch (ccb->ccb_h.func_code) {
652 case XPT_DEV_MATCH:
653 numbufs = min(mapinfo->num_bufs_used, 2);
654
655 if (numbufs == 1) {
656 data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches;
657 } else {
658 data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns;
659 data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches;
660 }
661 break;
662 case XPT_SCSI_IO:
663 case XPT_CONT_TARGET_IO:
664 data_ptrs[0] = &ccb->csio.data_ptr;
665 numbufs = min(mapinfo->num_bufs_used, 1);
666 break;
667 default:
668 /* allow ourselves to be swapped once again */
669 curproc->p_flag &= ~P_PHYSIO;
670 return;
671 break; /* NOTREACHED */
672 }
673
674 for (i = 0; i < numbufs; i++) {
675 /* Set the user's pointer back to the original value */
676 *data_ptrs[i] = mapinfo->bp[i]->b_saveaddr;
677
678 /* unmap the buffer */
679 vunmapbuf(mapinfo->bp[i]);
680
681 /* clear the flags we set above */
682 mapinfo->bp[i]->b_flags &= ~(B_PHYS|B_BUSY);
683
684 /* release the buffer */
685 relpbuf(mapinfo->bp[i]);
686 }
687
688 /* allow ourselves to be swapped once again */
689 curproc->p_flag &= ~P_PHYSIO;
690 }
691
692 union ccb *
693 cam_periph_getccb(struct cam_periph *periph, u_int32_t priority)
694 {
695 struct ccb_hdr *ccb_h;
696 int s;
697
698 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("entering cdgetccb\n"));
699
700 s = splsoftcam();
701
702 while (periph->ccb_list.slh_first == NULL) {
703 if (periph->immediate_priority > priority)
704 periph->immediate_priority = priority;
705 xpt_schedule(periph, priority);
706 if ((periph->ccb_list.slh_first != NULL)
707 && (periph->ccb_list.slh_first->pinfo.priority == priority))
708 break;
709 tsleep(&periph->ccb_list, PRIBIO, "cgticb", 0);
710 }
711
712 ccb_h = periph->ccb_list.slh_first;
713 SLIST_REMOVE_HEAD(&periph->ccb_list, periph_links.sle);
714 splx(s);
715 return ((union ccb *)ccb_h);
716 }
717
718 void
719 cam_periph_ccbwait(union ccb *ccb)
720 {
721 int s;
722
723 s = splsoftcam();
724 if ((ccb->ccb_h.pinfo.index != CAM_UNQUEUED_INDEX)
725 || ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG))
726 tsleep(&ccb->ccb_h.cbfcnp, PRIBIO, "cbwait", 0);
727
728 splx(s);
729 }
730
731 int
732 cam_periph_ioctl(struct cam_periph *periph, int cmd, caddr_t addr,
733 int (*error_routine)(union ccb *ccb,
734 cam_flags camflags,
735 u_int32_t sense_flags))
736 {
737 union ccb *ccb;
738 int error;
739 int found;
740
741 error = found = 0;
742
743 switch(cmd){
744 case CAMGETPASSTHRU:
745 ccb = cam_periph_getccb(periph, /* priority */ 1);
746 xpt_setup_ccb(&ccb->ccb_h,
747 ccb->ccb_h.path,
748 /*priority*/1);
749 ccb->ccb_h.func_code = XPT_GDEVLIST;
750
751 /*
752 * Basically, the point of this is that we go through
753 * getting the list of devices, until we find a passthrough
754 * device. In the current version of the CAM code, the
755 * only way to determine what type of device we're dealing
756 * with is by its name.
757 */
758 while (found == 0) {
759 ccb->cgdl.index = 0;
760 ccb->cgdl.status = CAM_GDEVLIST_MORE_DEVS;
761 while (ccb->cgdl.status == CAM_GDEVLIST_MORE_DEVS) {
762
763 /* we want the next device in the list */
764 xpt_action(ccb);
765 if (strncmp(ccb->cgdl.periph_name,
766 "pass", 4) == 0){
767 found = 1;
768 break;
769 }
770 }
771 if ((ccb->cgdl.status == CAM_GDEVLIST_LAST_DEVICE) &&
772 (found == 0)) {
773 ccb->cgdl.periph_name[0] = '\0';
774 ccb->cgdl.unit_number = 0;
775 break;
776 }
777 }
778
779 /* copy the result back out */
780 bcopy(ccb, addr, sizeof(union ccb));
781
782 /* and release the ccb */
783 xpt_release_ccb(ccb);
784
785 break;
786 default:
787 error = ENOTTY;
788 break;
789 }
790 return(error);
791 }
792
793 int
794 cam_periph_runccb(union ccb *ccb,
795 int (*error_routine)(union ccb *ccb,
796 cam_flags camflags,
797 u_int32_t sense_flags),
798 cam_flags camflags, u_int32_t sense_flags,
799 struct devstat *ds)
800 {
801 int error;
802
803 error = 0;
804
805 /*
806 * If the user has supplied a stats structure, and if we understand
807 * this particular type of ccb, record the transaction start.
808 */
809 if ((ds != NULL) && (ccb->ccb_h.func_code == XPT_SCSI_IO))
810 devstat_start_transaction(ds);
811
812 xpt_action(ccb);
813
814 do {
815 cam_periph_ccbwait(ccb);
816 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP)
817 error = 0;
818 else if (error_routine != NULL)
819 error = (*error_routine)(ccb, camflags, sense_flags);
820 else
821 error = 0;
822
823 } while (error == ERESTART);
824
825 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
826 cam_release_devq(ccb->ccb_h.path,
827 /* relsim_flags */0,
828 /* openings */0,
829 /* timeout */0,
830 /* getcount_only */ FALSE);
831
832 if ((ds != NULL) && (ccb->ccb_h.func_code == XPT_SCSI_IO))
833 devstat_end_transaction(ds,
834 ccb->csio.dxfer_len,
835 ccb->csio.tag_action & 0xf,
836 ((ccb->ccb_h.flags & CAM_DIR_MASK) ==
837 CAM_DIR_NONE) ? DEVSTAT_NO_DATA :
838 (ccb->ccb_h.flags & CAM_DIR_OUT) ?
839 DEVSTAT_WRITE :
840 DEVSTAT_READ);
841
842 return(error);
843 }
844
845 void
846 cam_freeze_devq(struct cam_path *path)
847 {
848 struct ccb_hdr ccb_h;
849
850 xpt_setup_ccb(&ccb_h, path, /*priority*/1);
851 ccb_h.func_code = XPT_NOOP;
852 ccb_h.flags = CAM_DEV_QFREEZE;
853 xpt_action((union ccb *)&ccb_h);
854 }
855
856 u_int32_t
857 cam_release_devq(struct cam_path *path, u_int32_t relsim_flags,
858 u_int32_t openings, u_int32_t timeout,
859 int getcount_only)
860 {
861 struct ccb_relsim crs;
862
863 xpt_setup_ccb(&crs.ccb_h, path,
864 /*priority*/1);
865 crs.ccb_h.func_code = XPT_REL_SIMQ;
866 crs.ccb_h.flags = getcount_only ? CAM_DEV_QFREEZE : 0;
867 crs.release_flags = relsim_flags;
868 crs.openings = openings;
869 crs.release_timeout = timeout;
870 xpt_action((union ccb *)&crs);
871 return (crs.qfrozen_cnt);
872 }
873
874 #define saved_ccb_ptr ppriv_ptr0
875 static void
876 camperiphdone(struct cam_periph *periph, union ccb *done_ccb)
877 {
878 cam_status status;
879 int frozen;
880 int sense;
881 struct scsi_start_stop_unit *scsi_cmd;
882 u_int32_t relsim_flags, timeout;
883 u_int32_t qfrozen_cnt;
884
885 status = done_ccb->ccb_h.status;
886 frozen = (status & CAM_DEV_QFRZN) != 0;
887 sense = (status & CAM_AUTOSNS_VALID) != 0;
888 status &= CAM_STATUS_MASK;
889
890 timeout = 0;
891 relsim_flags = 0;
892
893 /*
894 * Unfreeze the queue once if it is already frozen..
895 */
896 if (frozen != 0) {
897 qfrozen_cnt = cam_release_devq(done_ccb->ccb_h.path,
898 /*relsim_flags*/0,
899 /*openings*/0,
900 /*timeout*/0,
901 /*getcount_only*/0);
902 }
903
904 switch (status) {
905
906 case CAM_REQ_CMP:
907
908 /*
909 * If we have successfully taken a device from the not
910 * ready to ready state, re-scan the device and re-get the
911 * inquiry information. Many devices (mostly disks) don't
912 * properly report their inquiry information unless they
913 * are spun up.
914 */
915 if (done_ccb->ccb_h.func_code == XPT_SCSI_IO) {
916 scsi_cmd = (struct scsi_start_stop_unit *)
917 &done_ccb->csio.cdb_io.cdb_bytes;
918
919 if (scsi_cmd->opcode == START_STOP_UNIT)
920 xpt_async(AC_INQ_CHANGED,
921 done_ccb->ccb_h.path, NULL);
922 }
923 bcopy(done_ccb->ccb_h.saved_ccb_ptr, done_ccb,
924 sizeof(union ccb));
925
926 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG;
927
928 xpt_action(done_ccb);
929
930 break;
931 case CAM_SCSI_STATUS_ERROR:
932 scsi_cmd = (struct scsi_start_stop_unit *)
933 &done_ccb->csio.cdb_io.cdb_bytes;
934 if (sense != 0) {
935 struct scsi_sense_data *sense;
936 int error_code, sense_key, asc, ascq;
937
938 sense = &done_ccb->csio.sense_data;
939 scsi_extract_sense(sense, &error_code,
940 &sense_key, &asc, &ascq);
941
942 /*
943 * If the error is "invalid field in CDB",
944 * and the load/eject flag is set, turn the
945 * flag off and try again. This is just in
946 * case the drive in question barfs on the
947 * load eject flag. The CAM code should set
948 * the load/eject flag by default for
949 * removable media.
950 */
951
952 /* XXX KDM
953 * Should we check to see what the specific
954 * scsi status is?? Or does it not matter
955 * since we already know that there was an
956 * error, and we know what the specific
957 * error code was, and we know what the
958 * opcode is..
959 */
960 if ((scsi_cmd->opcode == START_STOP_UNIT) &&
961 ((scsi_cmd->how & SSS_LOEJ) != 0) &&
962 (asc == 0x24) && (ascq == 0x00) &&
963 (done_ccb->ccb_h.retry_count > 0)) {
964
965 scsi_cmd->how &= ~SSS_LOEJ;
966
967 xpt_action(done_ccb);
968
969 } else if (done_ccb->ccb_h.retry_count > 0) {
970 /*
971 * In this case, the error recovery
972 * command failed, but we've got
973 * some retries left on it. Give
974 * it another try.
975 */
976
977 /* set the timeout to .5 sec */
978 relsim_flags =
979 RELSIM_RELEASE_AFTER_TIMEOUT;
980 timeout = 500;
981
982 xpt_action(done_ccb);
983
984 break;
985
986 } else {
987 /*
988 * Copy the original CCB back and
989 * send it back to the caller.
990 */
991 bcopy(done_ccb->ccb_h.saved_ccb_ptr,
992 done_ccb, sizeof(union ccb));
993
994 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG;
995
996 xpt_action(done_ccb);
997 }
998 } else {
999 /*
1000 * Eh?? The command failed, but we don't
1001 * have any sense. What's up with that?
1002 * Fire the CCB again to return it to the
1003 * caller.
1004 */
1005 bcopy(done_ccb->ccb_h.saved_ccb_ptr,
1006 done_ccb, sizeof(union ccb));
1007
1008 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG;
1009
1010 xpt_action(done_ccb);
1011
1012 }
1013 break;
1014 default:
1015 bcopy(done_ccb->ccb_h.saved_ccb_ptr, done_ccb,
1016 sizeof(union ccb));
1017
1018 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG;
1019
1020 xpt_action(done_ccb);
1021
1022 break;
1023 }
1024
1025 /* decrement the retry count */
1026 if (done_ccb->ccb_h.retry_count > 0)
1027 done_ccb->ccb_h.retry_count--;
1028
1029 qfrozen_cnt = cam_release_devq(done_ccb->ccb_h.path,
1030 /*relsim_flags*/relsim_flags,
1031 /*openings*/0,
1032 /*timeout*/timeout,
1033 /*getcount_only*/0);
1034 }
1035
1036 /*
1037 * Generic Async Event handler. Peripheral drivers usually
1038 * filter out the events that require personal attention,
1039 * and leave the rest to this function.
1040 */
1041 void
1042 cam_periph_async(struct cam_periph *periph, u_int32_t code,
1043 struct cam_path *path, void *arg)
1044 {
1045 switch (code) {
1046 case AC_LOST_DEVICE:
1047 cam_periph_invalidate(periph);
1048 break;
1049 case AC_SENT_BDR:
1050 case AC_BUS_RESET:
1051 {
1052 cam_periph_bus_settle(periph, SCSI_DELAY);
1053 break;
1054 }
1055 default:
1056 break;
1057 }
1058 }
1059
1060 void
1061 cam_periph_bus_settle(struct cam_periph *periph, u_int bus_settle)
1062 {
1063 struct ccb_getdevstats cgds;
1064
1065 xpt_setup_ccb(&cgds.ccb_h, periph->path, /*priority*/1);
1066 cgds.ccb_h.func_code = XPT_GDEV_STATS;
1067 xpt_action((union ccb *)&cgds);
1068 cam_periph_freeze_after_event(periph, &cgds.last_reset, bus_settle);
1069 }
1070
1071 void
1072 cam_periph_freeze_after_event(struct cam_periph *periph,
1073 struct timeval* event_time, u_int duration_ms)
1074 {
1075 struct timeval delta;
1076 struct timeval duration_tv;
1077 int s;
1078
1079 s = splclock();
1080 microtime(&delta);
1081 splx(s);
1082 timevalsub(&delta, event_time);
1083 duration_tv.tv_sec = duration_ms / 1000;
1084 duration_tv.tv_usec = (duration_ms % 1000) * 1000;
1085 if (timevalcmp(&delta, &duration_tv, <)) {
1086 timevalsub(&duration_tv, &delta);
1087
1088 duration_ms = duration_tv.tv_sec * 1000;
1089 duration_ms += duration_tv.tv_usec / 1000;
1090 cam_freeze_devq(periph->path);
1091 cam_release_devq(periph->path,
1092 RELSIM_RELEASE_AFTER_TIMEOUT,
1093 /*reduction*/0,
1094 /*timeout*/duration_ms,
1095 /*getcount_only*/0);
1096 }
1097
1098 }
1099
1100 /*
1101 * Generic error handler. Peripheral drivers usually filter
1102 * out the errors that they handle in a unique mannor, then
1103 * call this function.
1104 */
1105 int
1106 cam_periph_error(union ccb *ccb, cam_flags camflags,
1107 u_int32_t sense_flags, union ccb *save_ccb)
1108 {
1109 cam_status status;
1110 int frozen;
1111 int sense;
1112 int error;
1113 int openings;
1114 int retry;
1115 u_int32_t relsim_flags;
1116 u_int32_t timeout;
1117
1118 status = ccb->ccb_h.status;
1119 frozen = (status & CAM_DEV_QFRZN) != 0;
1120 sense = (status & CAM_AUTOSNS_VALID) != 0;
1121 status &= CAM_STATUS_MASK;
1122 relsim_flags = 0;
1123
1124 switch (status) {
1125 case CAM_REQ_CMP:
1126 /* decrement the number of retries */
1127 retry = ccb->ccb_h.retry_count > 0;
1128 if (retry)
1129 ccb->ccb_h.retry_count--;
1130 error = 0;
1131 break;
1132 case CAM_AUTOSENSE_FAIL:
1133 case CAM_SCSI_STATUS_ERROR:
1134
1135 switch (ccb->csio.scsi_status) {
1136 case SCSI_STATUS_OK:
1137 case SCSI_STATUS_COND_MET:
1138 case SCSI_STATUS_INTERMED:
1139 case SCSI_STATUS_INTERMED_COND_MET:
1140 error = 0;
1141 break;
1142 case SCSI_STATUS_CMD_TERMINATED:
1143 case SCSI_STATUS_CHECK_COND:
1144 if (sense != 0) {
1145 struct scsi_sense_data *sense;
1146 int error_code, sense_key, asc, ascq;
1147 struct cam_periph *periph;
1148 scsi_sense_action err_action;
1149 struct ccb_getdev cgd;
1150
1151 sense = &ccb->csio.sense_data;
1152 scsi_extract_sense(sense, &error_code,
1153 &sense_key, &asc, &ascq);
1154 periph = xpt_path_periph(ccb->ccb_h.path);
1155
1156 /*
1157 * Grab the inquiry data for this device.
1158 */
1159 xpt_setup_ccb(&cgd.ccb_h, ccb->ccb_h.path,
1160 /*priority*/ 1);
1161 cgd.ccb_h.func_code = XPT_GDEV_TYPE;
1162 xpt_action((union ccb *)&cgd);
1163
1164 err_action = scsi_error_action(asc, ascq,
1165 &cgd.inq_data);
1166
1167 /*
1168 * Send a Test Unit Ready to the device.
1169 * If the 'many' flag is set, we send 120
1170 * test unit ready commands, one every half
1171 * second. Otherwise, we just send one TUR.
1172 * We only want to do this if the retry
1173 * count has not been exhausted.
1174 */
1175 if (((err_action & SS_MASK) == SS_TUR)
1176 && save_ccb != NULL
1177 && ccb->ccb_h.retry_count > 0) {
1178
1179 /*
1180 * Since error recovery is already
1181 * in progress, don't attempt to
1182 * process this error. It is probably
1183 * related to the error that caused
1184 * the currently active error recovery
1185 * action. Also, we only have
1186 * space for one saved CCB, so if we
1187 * had two concurrent error recovery
1188 * actions, we would end up
1189 * over-writing one error recovery
1190 * CCB with another one.
1191 */
1192 if (periph->flags &
1193 CAM_PERIPH_RECOVERY_INPROG) {
1194 error = ERESTART;
1195 break;
1196 }
1197
1198 periph->flags |=
1199 CAM_PERIPH_RECOVERY_INPROG;
1200
1201 /* decrement the number of retries */
1202 if ((err_action &
1203 SSQ_DECREMENT_COUNT) != 0) {
1204 retry = 1;
1205 ccb->ccb_h.retry_count--;
1206 }
1207
1208 bcopy(ccb, save_ccb, sizeof(*save_ccb));
1209
1210 /*
1211 * We retry this one every half
1212 * second for a minute. If the
1213 * device hasn't become ready in a
1214 * minute's time, it's unlikely to
1215 * ever become ready. If the table
1216 * doesn't specify SSQ_MANY, we can
1217 * only try this once. Oh well.
1218 */
1219 if ((err_action & SSQ_MANY) != 0)
1220 scsi_test_unit_ready(&ccb->csio,
1221 /*retries*/120,
1222 camperiphdone,
1223 MSG_SIMPLE_Q_TAG,
1224 SSD_FULL_SIZE,
1225 /*timeout*/5000);
1226 else
1227 scsi_test_unit_ready(&ccb->csio,
1228 /*retries*/1,
1229 camperiphdone,
1230 MSG_SIMPLE_Q_TAG,
1231 SSD_FULL_SIZE,
1232 /*timeout*/5000);
1233
1234 /* release the queue after .5 sec. */
1235 relsim_flags =
1236 RELSIM_RELEASE_AFTER_TIMEOUT;
1237 timeout = 500;
1238 /*
1239 * Drop the priority to 0 so that
1240 * we are the first to execute. Also
1241 * freeze the queue after this command
1242 * is sent so that we can restore the
1243 * old csio and have it queued in the
1244 * proper order before we let normal
1245 * transactions go to the drive.
1246 */
1247 ccb->ccb_h.pinfo.priority = 0;
1248 ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
1249
1250 /*
1251 * Save a pointer to the original
1252 * CCB in the new CCB.
1253 */
1254 ccb->ccb_h.saved_ccb_ptr = save_ccb;
1255
1256 error = ERESTART;
1257 }
1258 /*
1259 * Send a start unit command to the device,
1260 * and then retry the command. We only
1261 * want to do this if the retry count has
1262 * not been exhausted. If the user
1263 * specified 0 retries, then we follow
1264 * their request and do not retry.
1265 */
1266 else if (((err_action & SS_MASK) == SS_START)
1267 && save_ccb != NULL
1268 && ccb->ccb_h.retry_count > 0) {
1269 int le;
1270
1271 /*
1272 * Only one error recovery action
1273 * at a time. See above.
1274 */
1275 if (periph->flags &
1276 CAM_PERIPH_RECOVERY_INPROG) {
1277 error = ERESTART;
1278 break;
1279 }
1280
1281 periph->flags |=
1282 CAM_PERIPH_RECOVERY_INPROG;
1283
1284 /* decrement the number of retries */
1285 retry = 1;
1286 ccb->ccb_h.retry_count--;
1287
1288 /*
1289 * Check for removable media and
1290 * set load/eject flag
1291 * appropriately.
1292 */
1293 if (SID_IS_REMOVABLE(&cgd.inq_data))
1294 le = TRUE;
1295 else
1296 le = FALSE;
1297
1298 /*
1299 * Attempt to start the drive up.
1300 *
1301 * Save the current ccb so it can
1302 * be restored and retried once the
1303 * drive is started up.
1304 */
1305 bcopy(ccb, save_ccb, sizeof(*save_ccb));
1306
1307 scsi_start_stop(&ccb->csio,
1308 /*retries*/1,
1309 camperiphdone,
1310 MSG_SIMPLE_Q_TAG,
1311 /*start*/TRUE,
1312 /*load/eject*/le,
1313 /*immediate*/FALSE,
1314 SSD_FULL_SIZE,
1315 /*timeout*/50000);
1316 /*
1317 * Drop the priority to 0 so that
1318 * we are the first to execute. Also
1319 * freeze the queue after this command
1320 * is sent so that we can restore the
1321 * old csio and have it queued in the
1322 * proper order before we let normal
1323 * transactions go to the drive.
1324 */
1325 ccb->ccb_h.pinfo.priority = 0;
1326 ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
1327
1328 /*
1329 * Save a pointer to the original
1330 * CCB in the new CCB.
1331 */
1332 ccb->ccb_h.saved_ccb_ptr = save_ccb;
1333
1334 error = ERESTART;
1335 } else if ((sense_flags & SF_RETRY_UA) != 0) {
1336 /*
1337 * XXX KDM this is a *horrible*
1338 * hack.
1339 */
1340 error = scsi_interpret_sense(ccb,
1341 sense_flags,
1342 &relsim_flags,
1343 &openings,
1344 &timeout,
1345 err_action);
1346 }
1347
1348 /*
1349 * Theoretically, this code should send a
1350 * test unit ready to the given device, and
1351 * if it returns and error, send a start
1352 * unit command. Since we don't yet have
1353 * the capability to do two-command error
1354 * recovery, just send a start unit.
1355 * XXX KDM fix this!
1356 */
1357 else if (((err_action & SS_MASK) == SS_TURSTART)
1358 && save_ccb != NULL
1359 && ccb->ccb_h.retry_count > 0) {
1360 int le;
1361
1362 /*
1363 * Only one error recovery action
1364 * at a time. See above.
1365 */
1366 if (periph->flags &
1367 CAM_PERIPH_RECOVERY_INPROG) {
1368 error = ERESTART;
1369 break;
1370 }
1371
1372 periph->flags |=
1373 CAM_PERIPH_RECOVERY_INPROG;
1374
1375 /* decrement the number of retries */
1376 retry = 1;
1377 ccb->ccb_h.retry_count--;
1378
1379 /*
1380 * Check for removable media and
1381 * set load/eject flag
1382 * appropriately.
1383 */
1384 if (SID_IS_REMOVABLE(&cgd.inq_data))
1385 le = TRUE;
1386 else
1387 le = FALSE;
1388
1389 /*
1390 * Attempt to start the drive up.
1391 *
1392 * Save the current ccb so it can
1393 * be restored and retried once the
1394 * drive is started up.
1395 */
1396 bcopy(ccb, save_ccb, sizeof(*save_ccb));
1397
1398 scsi_start_stop(&ccb->csio,
1399 /*retries*/1,
1400 camperiphdone,
1401 MSG_SIMPLE_Q_TAG,
1402 /*start*/TRUE,
1403 /*load/eject*/le,
1404 /*immediate*/FALSE,
1405 SSD_FULL_SIZE,
1406 /*timeout*/50000);
1407
1408 /* release the queue after .5 sec. */
1409 relsim_flags =
1410 RELSIM_RELEASE_AFTER_TIMEOUT;
1411 timeout = 500;
1412 /*
1413 * Drop the priority to 0 so that
1414 * we are the first to execute. Also
1415 * freeze the queue after this command
1416 * is sent so that we can restore the
1417 * old csio and have it queued in the
1418 * proper order before we let normal
1419 * transactions go to the drive.
1420 */
1421 ccb->ccb_h.pinfo.priority = 0;
1422 ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
1423
1424 /*
1425 * Save a pointer to the original
1426 * CCB in the new CCB.
1427 */
1428 ccb->ccb_h.saved_ccb_ptr = save_ccb;
1429
1430 error = ERESTART;
1431 } else {
1432 error = scsi_interpret_sense(ccb,
1433 sense_flags,
1434 &relsim_flags,
1435 &openings,
1436 &timeout,
1437 err_action);
1438 }
1439 } else if (ccb->csio.scsi_status ==
1440 SCSI_STATUS_CHECK_COND
1441 && status != CAM_AUTOSENSE_FAIL) {
1442 /* no point in decrementing the retry count */
1443 panic("cam_periph_error: scsi status of "
1444 "CHECK COND returned but no sense "
1445 "information is availible. "
1446 "Controller should have returned "
1447 "CAM_AUTOSENSE_FAILED");
1448 /* NOTREACHED */
1449 error = EIO;
1450 } else if (ccb->ccb_h.retry_count == 0) {
1451 /*
1452 * XXX KDM shouldn't there be a better
1453 * argument to return??
1454 */
1455 error = EIO;
1456 } else {
1457 /* decrement the number of retries */
1458 retry = ccb->ccb_h.retry_count > 0;
1459 if (retry)
1460 ccb->ccb_h.retry_count--;
1461 /*
1462 * If it was aborted with no
1463 * clue as to the reason, just
1464 * retry it again.
1465 */
1466 error = ERESTART;
1467 }
1468 break;
1469 case SCSI_STATUS_QUEUE_FULL:
1470 {
1471 /* no decrement */
1472 struct ccb_getdevstats cgds;
1473
1474 /*
1475 * First off, find out what the current
1476 * transaction counts are.
1477 */
1478 xpt_setup_ccb(&cgds.ccb_h,
1479 ccb->ccb_h.path,
1480 /*priority*/1);
1481 cgds.ccb_h.func_code = XPT_GDEV_STATS;
1482 xpt_action((union ccb *)&cgds);
1483
1484 /*
1485 * If we were the only transaction active, treat
1486 * the QUEUE FULL as if it were a BUSY condition.
1487 */
1488 if (cgds.dev_active != 0) {
1489 int total_openings;
1490
1491 /*
1492 * Reduce the number of openings to
1493 * be 1 less than the amount it took
1494 * to get a queue full bounded by the
1495 * minimum allowed tag count for this
1496 * device.
1497 */
1498 total_openings =
1499 cgds.dev_active+cgds.dev_openings;
1500 openings = cgds.dev_active;
1501 if (openings < cgds.mintags)
1502 openings = cgds.mintags;
1503 if (openings < total_openings)
1504 relsim_flags = RELSIM_ADJUST_OPENINGS;
1505 else {
1506 /*
1507 * Some devices report queue full for
1508 * temporary resource shortages. For
1509 * this reason, we allow a minimum
1510 * tag count to be entered via a
1511 * quirk entry to prevent the queue
1512 * count on these devices from falling
1513 * to a pessimisticly low value. We
1514 * still wait for the next successful
1515 * completion, however, before queueing
1516 * more transactions to the device.
1517 */
1518 relsim_flags =
1519 RELSIM_RELEASE_AFTER_CMDCMPLT;
1520 }
1521 timeout = 0;
1522 error = ERESTART;
1523 break;
1524 }
1525 /* FALLTHROUGH */
1526 }
1527 case SCSI_STATUS_BUSY:
1528 /*
1529 * Restart the queue after either another
1530 * command completes or a 1 second timeout.
1531 * If we have any retries left, that is.
1532 */
1533 retry = ccb->ccb_h.retry_count > 0;
1534 if (retry) {
1535 ccb->ccb_h.retry_count--;
1536 error = ERESTART;
1537 relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT
1538 | RELSIM_RELEASE_AFTER_CMDCMPLT;
1539 timeout = 1000;
1540 } else {
1541 error = EIO;
1542 }
1543 break;
1544 case SCSI_STATUS_RESERV_CONFLICT:
1545 error = EIO;
1546 break;
1547 default:
1548 error = EIO;
1549 break;
1550 }
1551 break;
1552 case CAM_REQ_CMP_ERR:
1553 case CAM_CMD_TIMEOUT:
1554 case CAM_UNEXP_BUSFREE:
1555 case CAM_UNCOR_PARITY:
1556 case CAM_DATA_RUN_ERR:
1557 /* decrement the number of retries */
1558 retry = ccb->ccb_h.retry_count > 0;
1559 if (retry) {
1560 ccb->ccb_h.retry_count--;
1561 error = ERESTART;
1562 } else {
1563 error = EIO;
1564 }
1565 break;
1566 case CAM_UA_ABORT:
1567 case CAM_UA_TERMIO:
1568 case CAM_MSG_REJECT_REC:
1569 /* XXX Don't know that these are correct */
1570 error = EIO;
1571 break;
1572 case CAM_SEL_TIMEOUT:
1573 {
1574 /*
1575 * XXX
1576 * A single selection timeout should not be enough
1577 * to invalidate a device. We should retry for multiple
1578 * seconds assuming this isn't a probe. We'll probably
1579 * need a special flag for that.
1580 */
1581 #if 0
1582 struct cam_path *newpath;
1583
1584 /* Should we do more if we can't create the path?? */
1585 if (xpt_create_path(&newpath, xpt_path_periph(ccb->ccb_h.path),
1586 xpt_path_path_id(ccb->ccb_h.path),
1587 xpt_path_target_id(ccb->ccb_h.path),
1588 CAM_LUN_WILDCARD) != CAM_REQ_CMP)
1589 break;
1590 /*
1591 * Let peripheral drivers know that this device has gone
1592 * away.
1593 */
1594 xpt_async(AC_LOST_DEVICE, newpath, NULL);
1595 xpt_free_path(newpath);
1596 #endif
1597 if ((sense_flags & SF_RETRY_SELTO) != 0) {
1598 retry = ccb->ccb_h.retry_count > 0;
1599 if (retry) {
1600 ccb->ccb_h.retry_count--;
1601 error = ERESTART;
1602 /*
1603 * Wait half a second to give the device
1604 * time to recover before we try again.
1605 */
1606 relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
1607 timeout = 500;
1608 } else {
1609 error = ENXIO;
1610 }
1611 } else {
1612 error = ENXIO;
1613 }
1614 break;
1615 }
1616 case CAM_REQ_INVALID:
1617 case CAM_PATH_INVALID:
1618 case CAM_DEV_NOT_THERE:
1619 case CAM_NO_HBA:
1620 case CAM_PROVIDE_FAIL:
1621 case CAM_REQ_TOO_BIG:
1622 error = EINVAL;
1623 break;
1624 case CAM_SCSI_BUS_RESET:
1625 case CAM_BDR_SENT:
1626 case CAM_REQUEUE_REQ:
1627 /* Unconditional requeue, dammit */
1628 error = ERESTART;
1629 break;
1630 case CAM_RESRC_UNAVAIL:
1631 case CAM_BUSY:
1632 /* timeout??? */
1633 default:
1634 /* decrement the number of retries */
1635 retry = ccb->ccb_h.retry_count > 0;
1636 if (retry) {
1637 ccb->ccb_h.retry_count--;
1638 error = ERESTART;
1639 } else {
1640 /* Check the sense codes */
1641 error = EIO;
1642 }
1643 break;
1644 }
1645
1646 /* Attempt a retry */
1647 if (error == ERESTART || error == 0) {
1648 if (frozen != 0)
1649 ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
1650
1651 if (error == ERESTART)
1652 xpt_action(ccb);
1653
1654 if (frozen != 0) {
1655 cam_release_devq(ccb->ccb_h.path,
1656 relsim_flags,
1657 openings,
1658 timeout,
1659 /*getcount_only*/0);
1660 }
1661 }
1662
1663
1664 return (error);
1665 }
Cache object: 2a8a2b391e50771b86fef6ef369b1b7f
|